2026-03-10T10:57:02.349 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T10:57:02.354 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T10:57:02.377 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008 branch: squid description: orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 1-start 2-services/nfs-ingress2 3-final} email: null first_in_suite: false flavor: default job_id: '1008' last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC2qeTz9kgebr+/tt/WBU35lOMdGRDJIMmuqTN/zgSdMvbcqZUAsSl39SZ5oJJJVexmRqC0dL2lAVZHnwm3xvMc= vm07.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMjW4WkHNrk2RH3Bf4+vgLhD8tGBXm/cxTKi6xNE0HakNPWLvCr0e1pC1u7a/uu11t97iEwgCxGMzqM0Tsm92GI= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install runc nvmetcli nvme-cli -y - sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf - sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999 - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999 - echo test > /mnt/foo/testfile - sync - cephadm.shell: host.a: - "echo \"Check with each haproxy down in turn...\"\nfor haproxy in `ceph orch\ \ ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $haproxy\n while ! ceph orch ps | grep $haproxy | grep stopped; do sleep\ \ 1 ; done\n cat /mnt/foo/testfile\n echo $haproxy > /mnt/foo/testfile\n \ \ sync\n ceph orch daemon start $haproxy\n while ! ceph orch ps | grep $haproxy\ \ | grep running; do sleep 1 ; done\ndone\n" volumes: - /mnt/foo:/mnt/foo - vip.exec: all-hosts: - "echo \"Check with $(hostname) ganesha(s) down...\"\nfor c in `systemctl | grep\ \ ceph- | grep @nfs | awk '{print $1}'`; do\n cid=`echo $c | sed 's/@/-/'`\n\ \ id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'`\n fsid=`echo $c |\ \ cut -d @ -f 1 | cut -d - -f 2-`\n echo \"Removing daemon $id fsid $fsid...\"\ \n sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id\n\n echo \"\ Waking up cephadm...\"\n sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh\n\ \n while ! timeout 1 cat /mnt/foo/testfile ; do true ; done\n echo \"\ Mount is back!\"\ndone\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T10:57:02.377 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T10:57:02.377 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T10:57:02.377 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T10:57:02.377 INFO:teuthology.task.internal:Checking packages... 2026-03-10T10:57:02.378 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T10:57:02.378 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T10:57:02.378 INFO:teuthology.packaging:ref: None 2026-03-10T10:57:02.378 INFO:teuthology.packaging:tag: None 2026-03-10T10:57:02.378 INFO:teuthology.packaging:branch: squid 2026-03-10T10:57:02.378 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:57:02.378 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-10T10:57:03.096 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-10T10:57:03.097 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T10:57:03.099 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T10:57:03.099 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T10:57:03.101 INFO:teuthology.task.internal:Saving configuration 2026-03-10T10:57:03.106 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T10:57:03.107 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T10:57:03.114 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 10:55:48.427666', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC2qeTz9kgebr+/tt/WBU35lOMdGRDJIMmuqTN/zgSdMvbcqZUAsSl39SZ5oJJJVexmRqC0dL2lAVZHnwm3xvMc='} 2026-03-10T10:57:03.118 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm07.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 10:55:48.427279', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:07', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMjW4WkHNrk2RH3Bf4+vgLhD8tGBXm/cxTKi6xNE0HakNPWLvCr0e1pC1u7a/uu11t97iEwgCxGMzqM0Tsm92GI='} 2026-03-10T10:57:03.118 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T10:57:03.119 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.a', 'client.0'] 2026-03-10T10:57:03.119 INFO:teuthology.task.internal:roles: ubuntu@vm07.local - ['host.b', 'client.1'] 2026-03-10T10:57:03.119 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T10:57:03.126 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-10T10:57:03.132 DEBUG:teuthology.task.console_log:vm07 does not support IPMI; excluding 2026-03-10T10:57:03.132 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fd08d91e170>, signals=[15]) 2026-03-10T10:57:03.132 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T10:57:03.133 INFO:teuthology.task.internal:Opening connections... 2026-03-10T10:57:03.133 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-10T10:57:03.133 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T10:57:03.194 DEBUG:teuthology.task.internal:connecting to ubuntu@vm07.local 2026-03-10T10:57:03.194 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm07.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T10:57:03.254 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T10:57:03.256 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-10T10:57:03.310 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-10T10:57:03.310 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:NAME="CentOS Stream" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="9" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:ID="centos" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE="rhel fedora" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="9" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:PLATFORM_ID="platform:el9" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:ANSI_COLOR="0;31" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:LOGO="fedora-logo-icon" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://centos.org/" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T10:57:03.365 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T10:57:03.366 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-10T10:57:03.371 DEBUG:teuthology.orchestra.run.vm07:> uname -m 2026-03-10T10:57:03.386 INFO:teuthology.orchestra.run.vm07.stdout:x86_64 2026-03-10T10:57:03.386 DEBUG:teuthology.orchestra.run.vm07:> cat /etc/os-release 2026-03-10T10:57:03.443 INFO:teuthology.orchestra.run.vm07.stdout:NAME="CentOS Stream" 2026-03-10T10:57:03.443 INFO:teuthology.orchestra.run.vm07.stdout:VERSION="9" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:ID="centos" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:ID_LIKE="rhel fedora" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:VERSION_ID="9" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:PLATFORM_ID="platform:el9" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:ANSI_COLOR="0;31" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:LOGO="fedora-logo-icon" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:HOME_URL="https://centos.org/" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T10:57:03.444 INFO:teuthology.orchestra.run.vm07.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T10:57:03.444 INFO:teuthology.lock.ops:Updating vm07.local on lock server 2026-03-10T10:57:03.449 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T10:57:03.451 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T10:57:03.452 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T10:57:03.452 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-10T10:57:03.454 DEBUG:teuthology.orchestra.run.vm07:> test '!' -e /home/ubuntu/cephtest 2026-03-10T10:57:03.499 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T10:57:03.501 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T10:57:03.501 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-10T10:57:03.509 DEBUG:teuthology.orchestra.run.vm07:> test -z $(ls -A /var/lib/ceph) 2026-03-10T10:57:03.523 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T10:57:03.557 INFO:teuthology.orchestra.run.vm07.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T10:57:03.557 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T10:57:03.566 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-10T10:57:03.580 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T10:57:03.796 DEBUG:teuthology.orchestra.run.vm07:> test -e /ceph-qa-ready 2026-03-10T10:57:03.814 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T10:57:04.005 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T10:57:04.007 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T10:57:04.007 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T10:57:04.009 DEBUG:teuthology.orchestra.run.vm07:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T10:57:04.028 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T10:57:04.029 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T10:57:04.030 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T10:57:04.030 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T10:57:04.065 DEBUG:teuthology.orchestra.run.vm07:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T10:57:04.088 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T10:57:04.089 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T10:57:04.089 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T10:57:04.137 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T10:57:04.137 DEBUG:teuthology.orchestra.run.vm07:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T10:57:04.154 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T10:57:04.154 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T10:57:04.180 DEBUG:teuthology.orchestra.run.vm07:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T10:57:04.203 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T10:57:04.211 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T10:57:04.222 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T10:57:04.233 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T10:57:04.235 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T10:57:04.236 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T10:57:04.236 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T10:57:04.255 DEBUG:teuthology.orchestra.run.vm07:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T10:57:04.303 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T10:57:04.305 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T10:57:04.305 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T10:57:04.319 DEBUG:teuthology.orchestra.run.vm07:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T10:57:04.361 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T10:57:04.395 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T10:57:04.450 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T10:57:04.450 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T10:57:04.509 DEBUG:teuthology.orchestra.run.vm07:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T10:57:04.534 DEBUG:teuthology.orchestra.run.vm07:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T10:57:04.594 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T10:57:04.594 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T10:57:04.656 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-10T10:57:04.658 DEBUG:teuthology.orchestra.run.vm07:> sudo service rsyslog restart 2026-03-10T10:57:04.687 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T10:57:04.729 INFO:teuthology.orchestra.run.vm07.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T10:57:05.027 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T10:57:05.029 INFO:teuthology.task.internal:Starting timer... 2026-03-10T10:57:05.029 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T10:57:05.033 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T10:57:05.035 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0']} 2026-03-10T10:57:05.036 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-10T10:57:05.036 INFO:teuthology.task.selinux:Excluding vm07: VMs are not yet supported 2026-03-10T10:57:05.036 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T10:57:05.036 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T10:57:05.036 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T10:57:05.036 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T10:57:05.037 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T10:57:05.038 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T10:57:05.039 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T10:57:05.532 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T10:57:05.538 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T10:57:05.538 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryyi18wv43 --limit vm06.local,vm07.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T10:59:09.861 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm06.local'), Remote(name='ubuntu@vm07.local')] 2026-03-10T10:59:09.862 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-10T10:59:09.862 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T10:59:09.926 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-10T10:59:10.007 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-10T10:59:10.007 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm07.local' 2026-03-10T10:59:10.007 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm07.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T10:59:10.074 DEBUG:teuthology.orchestra.run.vm07:> true 2026-03-10T10:59:10.145 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm07.local' 2026-03-10T10:59:10.145 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T10:59:10.147 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T10:59:10.148 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T10:59:10.148 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T10:59:10.149 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T10:59:10.149 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T10:59:10.184 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T10:59:10.201 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T10:59:10.224 INFO:teuthology.orchestra.run.vm07.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T10:59:10.232 INFO:teuthology.orchestra.run.vm06.stderr:sudo: ntpd: command not found 2026-03-10T10:59:10.239 INFO:teuthology.orchestra.run.vm07.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T10:59:10.247 INFO:teuthology.orchestra.run.vm06.stdout:506 Cannot talk to daemon 2026-03-10T10:59:10.269 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T10:59:10.274 INFO:teuthology.orchestra.run.vm07.stderr:sudo: ntpd: command not found 2026-03-10T10:59:10.289 INFO:teuthology.orchestra.run.vm07.stdout:506 Cannot talk to daemon 2026-03-10T10:59:10.291 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T10:59:10.310 INFO:teuthology.orchestra.run.vm07.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T10:59:10.328 INFO:teuthology.orchestra.run.vm07.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T10:59:10.341 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-10T10:59:10.343 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T10:59:10.343 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-10T10:59:10.382 INFO:teuthology.orchestra.run.vm07.stderr:bash: line 1: ntpq: command not found 2026-03-10T10:59:10.384 INFO:teuthology.orchestra.run.vm07.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T10:59:10.384 INFO:teuthology.orchestra.run.vm07.stdout:=============================================================================== 2026-03-10T10:59:10.384 INFO:teuthology.run_tasks:Running task pexec... 2026-03-10T10:59:10.387 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-10T10:59:10.387 DEBUG:teuthology.orchestra.run.vm06:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-10T10:59:10.387 DEBUG:teuthology.orchestra.run.vm07:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-10T10:59:10.389 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf remove nvme-cli -y 2026-03-10T10:59:10.389 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-10T10:59:10.389 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.389 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.389 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm06.local 2026-03-10T10:59:10.389 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-10T10:59:10.389 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-10T10:59:10.389 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.389 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.429 DEBUG:teuthology.task.pexec:ubuntu@vm07.local< sudo dnf remove nvme-cli -y 2026-03-10T10:59:10.429 DEBUG:teuthology.task.pexec:ubuntu@vm07.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-10T10:59:10.429 DEBUG:teuthology.task.pexec:ubuntu@vm07.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.429 DEBUG:teuthology.task.pexec:ubuntu@vm07.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.429 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm07.local 2026-03-10T10:59:10.429 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-10T10:59:10.429 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-10T10:59:10.429 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.429 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-10T10:59:10.649 INFO:teuthology.orchestra.run.vm06.stdout:No match for argument: nvme-cli 2026-03-10T10:59:10.649 INFO:teuthology.orchestra.run.vm06.stderr:No packages marked for removal. 2026-03-10T10:59:10.652 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-10T10:59:10.653 INFO:teuthology.orchestra.run.vm06.stdout:Nothing to do. 2026-03-10T10:59:10.653 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-10T10:59:10.656 INFO:teuthology.orchestra.run.vm07.stdout:No match for argument: nvme-cli 2026-03-10T10:59:10.656 INFO:teuthology.orchestra.run.vm07.stderr:No packages marked for removal. 2026-03-10T10:59:10.660 INFO:teuthology.orchestra.run.vm07.stdout:Dependencies resolved. 2026-03-10T10:59:10.660 INFO:teuthology.orchestra.run.vm07.stdout:Nothing to do. 2026-03-10T10:59:10.660 INFO:teuthology.orchestra.run.vm07.stdout:Complete! 2026-03-10T10:59:11.141 INFO:teuthology.orchestra.run.vm07.stdout:Last metadata expiration check: 0:01:04 ago on Tue 10 Mar 2026 10:58:07 AM UTC. 2026-03-10T10:59:11.175 INFO:teuthology.orchestra.run.vm06.stdout:Last metadata expiration check: 0:01:18 ago on Tue 10 Mar 2026 10:57:53 AM UTC. 2026-03-10T10:59:11.263 INFO:teuthology.orchestra.run.vm07.stdout:Dependencies resolved. 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:================================================================================ 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: Package Arch Version Repository Size 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:================================================================================ 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Installing: 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Installing dependencies: 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Transaction Summary 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:================================================================================ 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Install 7 Packages 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Total download size: 6.3 M 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Installed size: 24 M 2026-03-10T10:59:11.264 INFO:teuthology.orchestra.run.vm07.stdout:Downloading Packages: 2026-03-10T10:59:11.300 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: Package Arch Version Repository Size 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Installing: 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Installing dependencies: 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Transaction Summary 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Install 7 Packages 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Total download size: 6.3 M 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Installed size: 24 M 2026-03-10T10:59:11.301 INFO:teuthology.orchestra.run.vm06.stdout:Downloading Packages: 2026-03-10T10:59:11.650 INFO:teuthology.orchestra.run.vm07.stdout:(1/7): nvmetcli-0.8-3.el9.noarch.rpm 256 kB/s | 44 kB 00:00 2026-03-10T10:59:11.660 INFO:teuthology.orchestra.run.vm07.stdout:(2/7): python3-configshell-1.1.30-1.el9.noarch. 398 kB/s | 72 kB 00:00 2026-03-10T10:59:11.707 INFO:teuthology.orchestra.run.vm07.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 1.5 MB/s | 84 kB 00:00 2026-03-10T10:59:11.724 INFO:teuthology.orchestra.run.vm07.stdout:(4/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.3 MB/s | 150 kB 00:00 2026-03-10T10:59:11.754 INFO:teuthology.orchestra.run.vm07.stdout:(5/7): nvme-cli-2.16-1.el9.x86_64.rpm 4.2 MB/s | 1.2 MB 00:00 2026-03-10T10:59:11.801 INFO:teuthology.orchestra.run.vm07.stdout:(6/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 8.8 MB/s | 837 kB 00:00 2026-03-10T10:59:12.012 INFO:teuthology.orchestra.run.vm07.stdout:(7/7): runc-1.4.0-2.el9.x86_64.rpm 14 MB/s | 4.0 MB 00:00 2026-03-10T10:59:12.013 INFO:teuthology.orchestra.run.vm07.stdout:-------------------------------------------------------------------------------- 2026-03-10T10:59:12.013 INFO:teuthology.orchestra.run.vm07.stdout:Total 8.4 MB/s | 6.3 MB 00:00 2026-03-10T10:59:12.117 INFO:teuthology.orchestra.run.vm07.stdout:Running transaction check 2026-03-10T10:59:12.129 INFO:teuthology.orchestra.run.vm07.stdout:Transaction check succeeded. 2026-03-10T10:59:12.129 INFO:teuthology.orchestra.run.vm07.stdout:Running transaction test 2026-03-10T10:59:12.215 INFO:teuthology.orchestra.run.vm07.stdout:Transaction test succeeded. 2026-03-10T10:59:12.215 INFO:teuthology.orchestra.run.vm07.stdout:Running transaction 2026-03-10T10:59:12.252 INFO:teuthology.orchestra.run.vm06.stdout:(1/7): nvmetcli-0.8-3.el9.noarch.rpm 108 kB/s | 44 kB 00:00 2026-03-10T10:59:12.263 INFO:teuthology.orchestra.run.vm06.stdout:(2/7): python3-configshell-1.1.30-1.el9.noarch. 173 kB/s | 72 kB 00:00 2026-03-10T10:59:12.431 INFO:teuthology.orchestra.run.vm07.stdout: Preparing : 1/1 2026-03-10T10:59:12.445 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-10T10:59:12.452 INFO:teuthology.orchestra.run.vm06.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 421 kB/s | 84 kB 00:00 2026-03-10T10:59:12.460 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-10T10:59:12.467 INFO:teuthology.orchestra.run.vm06.stdout:(4/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 737 kB/s | 150 kB 00:00 2026-03-10T10:59:12.471 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-10T10:59:12.479 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-10T10:59:12.481 INFO:teuthology.orchestra.run.vm07.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-10T10:59:12.518 INFO:teuthology.orchestra.run.vm06.stdout:(5/7): nvme-cli-2.16-1.el9.x86_64.rpm 1.7 MB/s | 1.2 MB 00:00 2026-03-10T10:59:12.546 INFO:teuthology.orchestra.run.vm07.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-10T10:59:12.608 INFO:teuthology.orchestra.run.vm06.stdout:(6/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 5.2 MB/s | 837 kB 00:00 2026-03-10T10:59:12.712 INFO:teuthology.orchestra.run.vm07.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-10T10:59:12.717 INFO:teuthology.orchestra.run.vm07.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-10T10:59:12.818 INFO:teuthology.orchestra.run.vm06.stdout:(7/7): runc-1.4.0-2.el9.x86_64.rpm 11 MB/s | 4.0 MB 00:00 2026-03-10T10:59:12.820 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-10T10:59:12.820 INFO:teuthology.orchestra.run.vm06.stdout:Total 4.1 MB/s | 6.3 MB 00:01 2026-03-10T10:59:12.920 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction check 2026-03-10T10:59:12.929 INFO:teuthology.orchestra.run.vm06.stdout:Transaction check succeeded. 2026-03-10T10:59:12.929 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction test 2026-03-10T10:59:13.011 INFO:teuthology.orchestra.run.vm06.stdout:Transaction test succeeded. 2026-03-10T10:59:13.012 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction 2026-03-10T10:59:13.168 INFO:teuthology.orchestra.run.vm07.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-10T10:59:13.168 INFO:teuthology.orchestra.run.vm07.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-10T10:59:13.168 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T10:59:13.234 INFO:teuthology.orchestra.run.vm06.stdout: Preparing : 1/1 2026-03-10T10:59:13.248 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-10T10:59:13.264 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-10T10:59:13.272 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-10T10:59:13.284 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-10T10:59:13.285 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-10T10:59:13.361 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-10T10:59:13.515 INFO:teuthology.orchestra.run.vm06.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-10T10:59:13.522 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-10T10:59:13.909 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-10T10:59:13.909 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-10T10:59:13.909 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T10:59:13.913 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-10T10:59:13.913 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-10T10:59:13.913 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-10T10:59:13.913 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-10T10:59:13.913 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-10T10:59:13.913 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout:Installed: 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T10:59:14.010 INFO:teuthology.orchestra.run.vm07.stdout:Complete! 2026-03-10T10:59:14.214 DEBUG:teuthology.parallel:result is None 2026-03-10T10:59:14.436 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-10T10:59:14.436 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-10T10:59:14.436 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-10T10:59:14.436 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-10T10:59:14.436 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-10T10:59:14.436 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout:Installed: 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T10:59:14.545 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-10T10:59:14.660 DEBUG:teuthology.parallel:result is None 2026-03-10T10:59:14.660 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T10:59:14.713 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T10:59:14.713 INFO:tasks.cephadm:Cluster image is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:59:14.713 INFO:tasks.cephadm:Cluster fsid is 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T10:59:14.713 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T10:59:14.713 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-10T10:59:14.713 INFO:tasks.cephadm:Monitor IPs: {'mon.vm06': '192.168.123.106', 'mon.vm07': '192.168.123.107'} 2026-03-10T10:59:14.713 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T10:59:14.713 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-03-10T10:59:14.753 DEBUG:teuthology.orchestra.run.vm07:> sudo hostname $(hostname -s) 2026-03-10T10:59:14.790 INFO:tasks.cephadm:Downloading "compiled" cephadm from cachra 2026-03-10T10:59:14.790 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:59:15.379 INFO:tasks.cephadm:builder_project result: [{'url': 'https://3.chacra.ceph.com/r/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'chacra_url': 'https://3.chacra.ceph.com/repos/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'ref': 'squid', 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df', 'distro': 'centos', 'distro_version': '9', 'distro_codename': None, 'modified': '2026-02-25 18:55:15.146628', 'status': 'ready', 'flavor': 'default', 'project': 'ceph', 'archs': ['source', 'x86_64'], 'extra': {'version': '19.2.3-678-ge911bdeb', 'package_manager_version': '19.2.3-678.ge911bdeb', 'build_url': 'https://jenkins.ceph.com/job/ceph-dev-pipeline/3275/', 'root_build_cause': '', 'node_name': '10.20.192.26+soko16', 'job_name': 'ceph-dev-pipeline'}}] 2026-03-10T10:59:15.972 INFO:tasks.util.chacra:got chacra host 3.chacra.ceph.com, ref squid, sha1 e911bdebe5c8faa3800735d1568fcdca65db60df from https://shaman.ceph.com/api/search/?project=ceph&distros=centos%2F9%2Fx86_64&flavor=default&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:59:15.973 INFO:tasks.cephadm:Discovered cachra url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-10T10:59:15.973 INFO:tasks.cephadm:Downloading cephadm from url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-10T10:59:15.973 DEBUG:teuthology.orchestra.run.vm06:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T10:59:17.338 INFO:teuthology.orchestra.run.vm06.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 10 10:59 /home/ubuntu/cephtest/cephadm 2026-03-10T10:59:17.338 DEBUG:teuthology.orchestra.run.vm07:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T10:59:18.773 INFO:teuthology.orchestra.run.vm07.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 10 10:59 /home/ubuntu/cephtest/cephadm 2026-03-10T10:59:18.774 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T10:59:18.793 DEBUG:teuthology.orchestra.run.vm07:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T10:59:18.826 INFO:tasks.cephadm:Pulling image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on all hosts... 2026-03-10T10:59:18.826 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-10T10:59:18.838 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-10T10:59:19.019 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T10:59:19.077 INFO:teuthology.orchestra.run.vm07.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T11:00:09.616 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-10T11:00:09.617 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-10T11:00:09.617 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-10T11:00:09.617 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-03-10T11:00:09.617 INFO:teuthology.orchestra.run.vm06.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-10T11:00:09.617 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-10T11:00:09.617 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-10T11:00:09.629 INFO:teuthology.orchestra.run.vm07.stdout:{ 2026-03-10T11:00:09.630 INFO:teuthology.orchestra.run.vm07.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-10T11:00:09.630 INFO:teuthology.orchestra.run.vm07.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-10T11:00:09.630 INFO:teuthology.orchestra.run.vm07.stdout: "repo_digests": [ 2026-03-10T11:00:09.630 INFO:teuthology.orchestra.run.vm07.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-10T11:00:09.630 INFO:teuthology.orchestra.run.vm07.stdout: ] 2026-03-10T11:00:09.630 INFO:teuthology.orchestra.run.vm07.stdout:} 2026-03-10T11:00:09.651 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-03-10T11:00:09.678 DEBUG:teuthology.orchestra.run.vm07:> sudo mkdir -p /etc/ceph 2026-03-10T11:00:09.705 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-03-10T11:00:09.742 DEBUG:teuthology.orchestra.run.vm07:> sudo chmod 777 /etc/ceph 2026-03-10T11:00:09.769 INFO:tasks.cephadm:Writing seed config... 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T11:00:09.770 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-10T11:00:09.770 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:00:09.770 DEBUG:teuthology.orchestra.run.vm06:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T11:00:09.798 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T11:00:09.798 DEBUG:teuthology.orchestra.run.vm06:mon.vm06> sudo journalctl -f -n 0 -u ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06.service 2026-03-10T11:00:09.841 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T11:00:09.841 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df -v bootstrap --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.106 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:00:09.980 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-10T11:00:09.980 INFO:teuthology.orchestra.run.vm06.stdout:cephadm ['--image', 'quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df', '-v', 'bootstrap', '--fsid', '2d4d1532-1c70-11f1-9ee5-8d2ac270c240', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.106', '--skip-admin-label'] 2026-03-10T11:00:09.980 INFO:teuthology.orchestra.run.vm06.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-10T11:00:09.980 INFO:teuthology.orchestra.run.vm06.stdout:Verifying podman|docker is present... 2026-03-10T11:00:09.998 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stdout 5.8.0 2026-03-10T11:00:09.998 INFO:teuthology.orchestra.run.vm06.stdout:Verifying lvm2 is present... 2026-03-10T11:00:09.998 INFO:teuthology.orchestra.run.vm06.stdout:Verifying time synchronization is in place... 2026-03-10T11:00:10.005 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-10T11:00:10.005 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T11:00:10.011 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-10T11:00:10.011 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-03-10T11:00:10.017 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout enabled 2026-03-10T11:00:10.023 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout active 2026-03-10T11:00:10.023 INFO:teuthology.orchestra.run.vm06.stdout:Unit chronyd.service is enabled and running 2026-03-10T11:00:10.023 INFO:teuthology.orchestra.run.vm06.stdout:Repeating the final host check... 2026-03-10T11:00:10.043 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stdout 5.8.0 2026-03-10T11:00:10.043 INFO:teuthology.orchestra.run.vm06.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-10T11:00:10.043 INFO:teuthology.orchestra.run.vm06.stdout:systemctl is present 2026-03-10T11:00:10.043 INFO:teuthology.orchestra.run.vm06.stdout:lvcreate is present 2026-03-10T11:00:10.049 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-10T11:00:10.049 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T11:00:10.056 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-10T11:00:10.056 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-03-10T11:00:10.062 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout enabled 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout active 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Unit chronyd.service is enabled and running 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Host looks OK 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Cluster fsid: 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Acquiring lock 140635482027920 on /run/cephadm/2d4d1532-1c70-11f1-9ee5-8d2ac270c240.lock 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Lock 140635482027920 acquired on /run/cephadm/2d4d1532-1c70-11f1-9ee5-8d2ac270c240.lock 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Verifying IP 192.168.123.106 port 3300 ... 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Verifying IP 192.168.123.106 port 6789 ... 2026-03-10T11:00:10.068 INFO:teuthology.orchestra.run.vm06.stdout:Base mon IP(s) is [192.168.123.106:3300, 192.168.123.106:6789], mon addrv is [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-03-10T11:00:10.071 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.106 metric 100 2026-03-10T11:00:10.071 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.106 metric 100 2026-03-10T11:00:10.074 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T11:00:10.074 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-10T11:00:10.076 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T11:00:10.076 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-10T11:00:10.076 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-10T11:00:10.076 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-10T11:00:10.076 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:6/64 scope link noprefixroute 2026-03-10T11:00:10.076 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-10T11:00:10.077 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.0/24` 2026-03-10T11:00:10.077 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.0/24` 2026-03-10T11:00:10.077 INFO:teuthology.orchestra.run.vm06.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-10T11:00:10.077 INFO:teuthology.orchestra.run.vm06.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T11:00:10.077 INFO:teuthology.orchestra.run.vm06.stdout:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T11:00:11.291 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stdout 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-10T11:00:11.292 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Trying to pull quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T11:00:11.292 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Getting image source signatures 2026-03-10T11:00:11.292 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Copying blob sha256:1752b8d01aa0dd33bbe0ab24e8316174c94fbdcd5d26252e2680bba0624747a7 2026-03-10T11:00:11.292 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Copying blob sha256:8e380faede39ebd4286247457b408d979ab568aafd8389c42ec304b8cfba4e92 2026-03-10T11:00:11.292 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Copying config sha256:654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-10T11:00:11.292 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-10T11:00:11.557 INFO:teuthology.orchestra.run.vm06.stdout:ceph: stdout ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-10T11:00:11.557 INFO:teuthology.orchestra.run.vm06.stdout:Ceph version: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-10T11:00:11.557 INFO:teuthology.orchestra.run.vm06.stdout:Extracting ceph user uid/gid from container image... 2026-03-10T11:00:11.754 INFO:teuthology.orchestra.run.vm06.stdout:stat: stdout 167 167 2026-03-10T11:00:11.755 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial keys... 2026-03-10T11:00:11.979 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQC7+a9pYisNMxAABPLzzcrKUDjEby4xZGtvJQ== 2026-03-10T11:00:12.210 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQC8+a9pSkhABBAAhvs9t3feXrBySAh7rDV4dw== 2026-03-10T11:00:12.449 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQC8+a9pa6kiEhAAgiPT80OL4KsOIXTikd+vsA== 2026-03-10T11:00:12.450 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial monmap... 2026-03-10T11:00:12.662 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T11:00:12.662 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:monmaptool for vm06 [v2:192.168.123.106:3300,v1:192.168.123.106:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:setting min_mon_release = quincy 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: set fsid to 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:12.663 INFO:teuthology.orchestra.run.vm06.stdout:Creating mon... 2026-03-10T11:00:12.893 INFO:teuthology.orchestra.run.vm06.stdout:create mon.vm06 on 2026-03-10T11:00:13.183 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T11:00:13.310 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240.target → /etc/systemd/system/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240.target. 2026-03-10T11:00:13.310 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240.target → /etc/systemd/system/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240.target. 2026-03-10T11:00:13.459 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06 2026-03-10T11:00:13.459 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to reset failed state of unit ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06.service: Unit ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06.service not loaded. 2026-03-10T11:00:13.587 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240.target.wants/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06.service → /etc/systemd/system/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@.service. 2026-03-10T11:00:13.732 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 podman[49115]: 2026-03-10 11:00:13.696167553 +0000 UTC m=+0.015709274 container create 9a2ed74940358073b6e7d1a10d78267f18ed04465c4056dff3ee0f51401985a7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-10T11:00:13.749 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-10T11:00:13.749 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to enable service . firewalld.service is not available 2026-03-10T11:00:13.749 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mon to start... 2026-03-10T11:00:13.749 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mon... 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 podman[49115]: 2026-03-10 11:00:13.733273493 +0000 UTC m=+0.052815224 container init 9a2ed74940358073b6e7d1a10d78267f18ed04465c4056dff3ee0f51401985a7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 podman[49115]: 2026-03-10 11:00:13.737805075 +0000 UTC m=+0.057346796 container start 9a2ed74940358073b6e7d1a10d78267f18ed04465c4056dff3ee0f51401985a7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3) 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 bash[49115]: 9a2ed74940358073b6e7d1a10d78267f18ed04465c4056dff3ee0f51401985a7 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 podman[49115]: 2026-03-10 11:00:13.689589902 +0000 UTC m=+0.009131632 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 systemd[1]: Started Ceph mon.vm06 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240. 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 ceph-mon[49148]: mkfs 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:14.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:13 vm06 ceph-mon[49148]: mon.vm06 is new leader, mons vm06 in quorum (ranks 0) 2026-03-10T11:00:14.052 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout cluster: 2026-03-10T11:00:14.052 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout id: 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:14.052 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout services: 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm06 (age 0.146683s) 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout data: 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout pgs: 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:mon is available 2026-03-10T11:00:14.053 INFO:teuthology.orchestra.run.vm06.stdout:Assimilating anything we can from ceph.conf... 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [global] 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout fsid = 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-10T11:00:14.366 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [osd] 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-10T11:00:14.367 INFO:teuthology.orchestra.run.vm06.stdout:Generating new minimal ceph.conf... 2026-03-10T11:00:14.694 INFO:teuthology.orchestra.run.vm06.stdout:Restarting the monitor... 2026-03-10T11:00:15.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:14 vm06 systemd[1]: Stopping Ceph mon.vm06 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240... 2026-03-10T11:00:15.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:14 vm06 ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06[49125]: 2026-03-10T11:00:14.775+0000 7fdb44f6e640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm06 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T11:00:15.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:14 vm06 ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06[49125]: 2026-03-10T11:00:14.775+0000 7fdb44f6e640 -1 mon.vm06@0(leader) e1 *** Got Signal Terminated *** 2026-03-10T11:00:15.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:14 vm06 podman[49419]: 2026-03-10 11:00:14.908309693 +0000 UTC m=+0.147663530 container died 9a2ed74940358073b6e7d1a10d78267f18ed04465c4056dff3ee0f51401985a7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, OSD_FLAVOR=default, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T11:00:15.239 INFO:teuthology.orchestra.run.vm06.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-10T11:00:15.293 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 podman[49419]: 2026-03-10 11:00:15.039934592 +0000 UTC m=+0.279288419 container remove 9a2ed74940358073b6e7d1a10d78267f18ed04465c4056dff3ee0f51401985a7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 bash[49419]: ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 systemd[1]: ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06.service: Deactivated successfully. 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 systemd[1]: Stopped Ceph mon.vm06 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240. 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 systemd[1]: Starting Ceph mon.vm06 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240... 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 podman[49499]: 2026-03-10 11:00:15.195186294 +0000 UTC m=+0.020067653 container create 5c495012543afbefb6c8ac026cbada1dd0dc5c380bdd599a02bc05bc659a39ba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 podman[49499]: 2026-03-10 11:00:15.222390861 +0000 UTC m=+0.047272231 container init 5c495012543afbefb6c8ac026cbada1dd0dc5c380bdd599a02bc05bc659a39ba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 podman[49499]: 2026-03-10 11:00:15.226783382 +0000 UTC m=+0.051664741 container start 5c495012543afbefb6c8ac026cbada1dd0dc5c380bdd599a02bc05bc659a39ba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 bash[49499]: 5c495012543afbefb6c8ac026cbada1dd0dc5c380bdd599a02bc05bc659a39ba 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 podman[49499]: 2026-03-10 11:00:15.184957128 +0000 UTC m=+0.009838487 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 systemd[1]: Started Ceph mon.vm06 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240. 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 6 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: pidfile_write: ignore empty --pid-file 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: load: jerasure load: lrc 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: RocksDB version: 7.9.2 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Git sha 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: DB SUMMARY 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: DB Session ID: OB5B3RBW7YZLYCUOOSKK 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: CURRENT file: CURRENT 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm06/store.db dir, Total Num: 1, files: 000008.sst 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm06/store.db: 000009.log size: 75099 ; 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.error_if_exists: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.create_if_missing: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.paranoid_checks: 1 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.env: 0x55d4f57b3dc0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.info_log: 0x55d4f66f0b20 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.statistics: (nil) 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.use_fsync: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_log_file_size: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.allow_fallocate: 1 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.use_direct_reads: 0 2026-03-10T11:00:15.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.db_log_dir: 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.wal_dir: 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.write_buffer_manager: 0x55d4f66f5900 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.unordered_write: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.row_cache: None 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.wal_filter: None 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.two_write_queues: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.wal_compression: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.atomic_flush: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.log_readahead_size: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_background_jobs: 2 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_background_compactions: -1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_subcompactions: 1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_open_files: -1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_background_flushes: -1 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Compression algorithms supported: 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kZSTD supported: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kXpressCompression supported: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kBZip2Compression supported: 0 2026-03-10T11:00:15.295 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kLZ4Compression supported: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kZlibCompression supported: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: kSnappyCompression supported: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000010 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.merge_operator: 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_filter: None 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55d4f66f06e0) 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: cache_index_and_filter_blocks: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: pin_top_level_index_and_filter: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: index_type: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_block_index_type: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: index_shortening: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: checksum: 4 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: no_block_cache: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache: 0x55d4f6715350 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache_name: BinnedLRUCache 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache_options: 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: capacity : 536870912 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: num_shard_bits : 4 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: strict_capacity_limit : 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: high_pri_pool_ratio: 0.000 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache_compressed: (nil) 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: persistent_cache: (nil) 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_size: 4096 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_size_deviation: 10 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_restart_interval: 16 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: index_block_restart_interval: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: metadata_block_size: 4096 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: partition_filters: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: use_delta_encoding: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: filter_policy: bloomfilter 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: whole_key_filtering: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: verify_compression: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: read_amp_bytes_per_bit: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: format_version: 5 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: enable_index_compression: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_align: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: max_auto_readahead_size: 262144 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: prepopulate_block_cache: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: initial_auto_readahead_size: 8192 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression: NoCompression 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.num_levels: 7 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T11:00:15.296 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.inplace_update_support: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T11:00:15.297 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.bloom_locality: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.max_successive_merges: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.ttl: 2592000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.enable_blob_files: false 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.min_blob_size: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 33e4f36f-f794-4bbb-a3b7-de71f1518eb4 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773140415248235, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773140415254947, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 72167, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 223, "table_properties": {"data_size": 70446, "index_size": 174, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9562, "raw_average_key_size": 49, "raw_value_size": 65071, "raw_average_value_size": 335, "num_data_blocks": 8, "num_entries": 194, "num_filter_entries": 194, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773140415, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "33e4f36f-f794-4bbb-a3b7-de71f1518eb4", "db_session_id": "OB5B3RBW7YZLYCUOOSKK", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773140415255008, "job": 1, "event": "recovery_finished"} 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm06/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55d4f6716e00 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: DB pointer 0x55d4f6830000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** DB Stats ** 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** Compaction Stats [default] ** 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: L0 2/0 72.35 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 10.6 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Sum 2/0 72.35 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 10.6 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 10.6 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** Compaction Stats [default] ** 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 10.6 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T11:00:15.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative compaction: 0.00 GB write, 6.13 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval compaction: 0.00 GB write, 6.13 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Block cache BinnedLRUCache@0x55d4f6715350#6 capacity: 512.00 MB usage: 1.06 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 9e-06 secs_since: 0 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-10T11:00:15.299 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: mon.vm06 is new leader, mons vm06 in quorum (ranks 0) 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: monmap epoch 1 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: last_changed 2026-03-10T11:00:12.539852+0000 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: created 2026-03-10T11:00:12.539852+0000 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: min_mon_release 19 (squid) 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: election_strategy: 1 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: fsmap 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T11:00:15.555 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:15 vm06 ceph-mon[49534]: mgrmap e1: no daemons active 2026-03-10T11:00:15.565 INFO:teuthology.orchestra.run.vm06.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-10T11:00:15.565 INFO:teuthology.orchestra.run.vm06.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:00:15.565 INFO:teuthology.orchestra.run.vm06.stdout:Creating mgr... 2026-03-10T11:00:15.567 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-10T11:00:15.567 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-10T11:00:15.567 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-10T11:00:15.714 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mgr.vm06.luxohm 2026-03-10T11:00:15.715 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to reset failed state of unit ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mgr.vm06.luxohm.service: Unit ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mgr.vm06.luxohm.service not loaded. 2026-03-10T11:00:15.832 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240.target.wants/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mgr.vm06.luxohm.service → /etc/systemd/system/ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@.service. 2026-03-10T11:00:16.010 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-10T11:00:16.010 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to enable service . firewalld.service is not available 2026-03-10T11:00:16.010 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-10T11:00:16.010 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-10T11:00:16.010 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr to start... 2026-03-10T11:00:16.010 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr... 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "2d4d1532-1c70-11f1-9ee5-8d2ac270c240", 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-03-10T11:00:16.348 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T11:00:13:774701+0000", 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T11:00:13.775968+0000", 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:16.349 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (1/15)... 2026-03-10T11:00:16.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:16 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1229509155' entity='client.admin' 2026-03-10T11:00:16.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:16 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/381692841' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "2d4d1532-1c70-11f1-9ee5-8d2ac270c240", 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T11:00:18.665 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T11:00:18.666 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T11:00:13:774701+0000", 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T11:00:13.775968+0000", 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:18.667 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (2/15)... 2026-03-10T11:00:18.737 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:18 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2690923137' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T11:00:20.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: Activating manager daemon vm06.luxohm 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: mgrmap e2: vm06.luxohm(active, starting, since 0.00415659s) 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr metadata", "who": "vm06.luxohm", "id": "vm06.luxohm"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: Manager daemon vm06.luxohm is now available 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/mirror_snapshot_schedule"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/trash_purge_schedule"}]: dispatch 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' 2026-03-10T11:00:20.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:19 vm06 ceph-mon[49534]: from='mgr.14100 192.168.123.106:0/1695568775' entity='mgr.vm06.luxohm' 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "2d4d1532-1c70-11f1-9ee5-8d2ac270c240", 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.087 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T11:00:21.088 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T11:00:21.088 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T11:00:21.088 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T11:00:21.088 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T11:00:21.088 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T11:00:13:774701+0000", 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T11:00:13.775968+0000", 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:21.089 INFO:teuthology.orchestra.run.vm06.stdout:mgr is available 2026-03-10T11:00:21.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:21 vm06 ceph-mon[49534]: mgrmap e3: vm06.luxohm(active, since 1.00779s) 2026-03-10T11:00:21.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:21 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1386978433' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [global] 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout fsid = 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-10T11:00:21.461 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [osd] 2026-03-10T11:00:21.462 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-10T11:00:21.462 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-10T11:00:21.462 INFO:teuthology.orchestra.run.vm06.stdout:Enabling cephadm module... 2026-03-10T11:00:22.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:22 vm06 ceph-mon[49534]: mgrmap e4: vm06.luxohm(active, since 2s) 2026-03-10T11:00:22.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:22 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2098575607' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T11:00:22.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:22 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/706360834' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "active_name": "vm06.luxohm", 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for the mgr to restart... 2026-03-10T11:00:22.601 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr epoch 5... 2026-03-10T11:00:23.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:23 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/706360834' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T11:00:23.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:23 vm06 ceph-mon[49534]: mgrmap e5: vm06.luxohm(active, since 3s) 2026-03-10T11:00:23.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:23 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3674246401' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T11:00:26.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: Active manager daemon vm06.luxohm restarted 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: Activating manager daemon vm06.luxohm 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: mgrmap e6: vm06.luxohm(active, starting, since 0.799067s) 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr metadata", "who": "vm06.luxohm", "id": "vm06.luxohm"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: Manager daemon vm06.luxohm is now available 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:00:26.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:25 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/mirror_snapshot_schedule"}]: dispatch 2026-03-10T11:00:27.012 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:27.012 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 6, 2026-03-10T11:00:27.012 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-10T11:00:27.012 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:27.012 INFO:teuthology.orchestra.run.vm06.stdout:mgr epoch 5 is available 2026-03-10T11:00:27.012 INFO:teuthology.orchestra.run.vm06.stdout:Setting orchestrator backend to cephadm... 2026-03-10T11:00:27.129 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:26 vm06 ceph-mon[49534]: Found migration_current of "None". Setting to last migration. 2026-03-10T11:00:27.129 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:26 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/trash_purge_schedule"}]: dispatch 2026-03-10T11:00:27.129 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:26 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:27.129 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:26 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:27.129 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:26 vm06 ceph-mon[49534]: mgrmap e7: vm06.luxohm(active, since 1.82438s) 2026-03-10T11:00:27.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-10T11:00:27.803 INFO:teuthology.orchestra.run.vm06.stdout:Generating ssh key... 2026-03-10T11:00:28.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T11:00:28.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T11:00:28.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:26] ENGINE Bus STARTING 2026-03-10T11:00:28.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:27] ENGINE Serving on http://192.168.123.106:8765 2026-03-10T11:00:28.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:00:28.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:28.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:27 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:00:28.549 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGgQ4bY/9kNyy3jwpBTOy2diWaTfzUpRJ6RAvDRjGWtK ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:28.549 INFO:teuthology.orchestra.run.vm06.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T11:00:28.549 INFO:teuthology.orchestra.run.vm06.stdout:Adding key to root@localhost authorized_keys... 2026-03-10T11:00:28.549 INFO:teuthology.orchestra.run.vm06.stdout:Adding host vm06... 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:27] ENGINE Serving on https://192.168.123.106:7150 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:27] ENGINE Bus STARTED 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:27] ENGINE Client ('192.168.123.106', 48160) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: mgrmap e8: vm06.luxohm(active, since 2s) 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: Generating ssh key... 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:29.085 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:28 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:30.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:29 vm06 ceph-mon[49534]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:30.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:29 vm06 ceph-mon[49534]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "addr": "192.168.123.106", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:30.470 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Added host 'vm06' with addr '192.168.123.106' 2026-03-10T11:00:30.470 INFO:teuthology.orchestra.run.vm06.stdout:Deploying mon service with default placement... 2026-03-10T11:00:30.869 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-10T11:00:30.869 INFO:teuthology.orchestra.run.vm06.stdout:Deploying mgr service with default placement... 2026-03-10T11:00:31.117 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:30 vm06 ceph-mon[49534]: Deploying cephadm binary to vm06 2026-03-10T11:00:31.117 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:30 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:31.117 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:30 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:00:31.117 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:30 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:31.260 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-10T11:00:31.260 INFO:teuthology.orchestra.run.vm06.stdout:Deploying crash service with default placement... 2026-03-10T11:00:31.625 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-10T11:00:31.625 INFO:teuthology.orchestra.run.vm06.stdout:Deploying ceph-exporter service with default placement... 2026-03-10T11:00:32.118 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-10T11:00:32.119 INFO:teuthology.orchestra.run.vm06.stdout:Deploying prometheus service with default placement... 2026-03-10T11:00:32.144 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:31 vm06 ceph-mon[49534]: Added host vm06 2026-03-10T11:00:32.144 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:31 vm06 ceph-mon[49534]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:32.144 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:31 vm06 ceph-mon[49534]: Saving service mon spec with placement count:5 2026-03-10T11:00:32.144 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:31 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:32.144 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:31 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:32.535 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-10T11:00:32.535 INFO:teuthology.orchestra.run.vm06.stdout:Deploying grafana service with default placement... 2026-03-10T11:00:32.936 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-10T11:00:32.936 INFO:teuthology.orchestra.run.vm06.stdout:Deploying node-exporter service with default placement... 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: Saving service mgr spec with placement count:2 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: Saving service crash spec with placement * 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: Saving service ceph-exporter spec with placement * 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.208 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:32 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.347 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-10T11:00:33.348 INFO:teuthology.orchestra.run.vm06.stdout:Deploying alertmanager service with default placement... 2026-03-10T11:00:33.772 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: Saving service prometheus spec with placement count:1 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: Saving service grafana spec with placement count:1 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:33.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:33 vm06 ceph-mon[49534]: from='mgr.14118 192.168.123.106:0/1128871309' entity='mgr.vm06.luxohm' 2026-03-10T11:00:34.550 INFO:teuthology.orchestra.run.vm06.stdout:Enabling the dashboard module... 2026-03-10T11:00:35.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:35.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: Saving service node-exporter spec with placement * 2026-03-10T11:00:35.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:35.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: Saving service alertmanager spec with placement count:1 2026-03-10T11:00:35.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1910136209' entity='client.admin' 2026-03-10T11:00:35.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2157952450' entity='client.admin' 2026-03-10T11:00:35.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:35 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2052060625' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "active_name": "vm06.luxohm", 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for the mgr to restart... 2026-03-10T11:00:36.239 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr epoch 9... 2026-03-10T11:00:36.592 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:36 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2052060625' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T11:00:36.592 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:36 vm06 ceph-mon[49534]: mgrmap e9: vm06.luxohm(active, since 10s) 2026-03-10T11:00:36.592 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:36 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2044033860' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T11:00:39.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: Active manager daemon vm06.luxohm restarted 2026-03-10T11:00:39.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: Activating manager daemon vm06.luxohm 2026-03-10T11:00:39.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T11:00:39.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: mgrmap e10: vm06.luxohm(active, starting, since 0.00554969s) 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr metadata", "who": "vm06.luxohm", "id": "vm06.luxohm"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: Manager daemon vm06.luxohm is now available 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/mirror_snapshot_schedule"}]: dispatch 2026-03-10T11:00:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:38 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/trash_purge_schedule"}]: dispatch 2026-03-10T11:00:39.884 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-10T11:00:39.884 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-10T11:00:39.884 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-10T11:00:39.884 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-10T11:00:39.884 INFO:teuthology.orchestra.run.vm06.stdout:mgr epoch 9 is available 2026-03-10T11:00:39.884 INFO:teuthology.orchestra.run.vm06.stdout:Generating a dashboard self-signed certificate... 2026-03-10T11:00:40.316 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:40.316 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:39] ENGINE Bus STARTING 2026-03-10T11:00:40.316 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:39] ENGINE Serving on https://192.168.123.106:7150 2026-03-10T11:00:40.316 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:39] ENGINE Client ('192.168.123.106', 33680) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T11:00:40.347 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-10T11:00:40.347 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial admin user... 2026-03-10T11:00:40.737 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:40.737 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: mgrmap e11: vm06.luxohm(active, since 1.00973s) 2026-03-10T11:00:40.737 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:40.737 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:40 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:40.869 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$Gbywf.JDoDeovFML1pdhuOjJzqZDv1cp8Y7nJC9pW.8BD2HtrGtBS", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773140440, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T11:00:40.869 INFO:teuthology.orchestra.run.vm06.stdout:Fetching dashboard port number... 2026-03-10T11:00:41.255 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 8443 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout:Ceph Dashboard is now available at: 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout: URL: https://vm06.local:8443/ 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout: User: admin 2026-03-10T11:00:41.256 INFO:teuthology.orchestra.run.vm06.stdout: Password: lkusd7phjo 2026-03-10T11:00:41.257 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.257 INFO:teuthology.orchestra.run.vm06.stdout:Saving cluster configuration to /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config directory 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:39] ENGINE Serving on http://192.168.123.106:8765 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: [10/Mar/2026:11:00:39] ENGINE Bus STARTED 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:41.374 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:41 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1052268540' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout:Or, if you are only running a single cluster on this host: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: ceph telemetry on 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout:For more information see: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:41.670 INFO:teuthology.orchestra.run.vm06.stdout:Bootstrap complete. 2026-03-10T11:00:41.704 INFO:tasks.cephadm:Fetching config... 2026-03-10T11:00:41.704 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:00:41.704 DEBUG:teuthology.orchestra.run.vm06:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T11:00:41.728 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T11:00:41.729 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:00:41.729 DEBUG:teuthology.orchestra.run.vm06:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T11:00:41.794 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T11:00:41.794 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:00:41.794 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/keyring of=/dev/stdout 2026-03-10T11:00:41.871 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T11:00:41.872 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:00:41.872 DEBUG:teuthology.orchestra.run.vm06:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T11:00:41.936 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T11:00:41.936 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGgQ4bY/9kNyy3jwpBTOy2diWaTfzUpRJ6RAvDRjGWtK ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T11:00:42.053 INFO:teuthology.orchestra.run.vm06.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGgQ4bY/9kNyy3jwpBTOy2diWaTfzUpRJ6RAvDRjGWtK ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:42.066 DEBUG:teuthology.orchestra.run.vm07:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGgQ4bY/9kNyy3jwpBTOy2diWaTfzUpRJ6RAvDRjGWtK ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T11:00:42.103 INFO:teuthology.orchestra.run.vm07.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGgQ4bY/9kNyy3jwpBTOy2diWaTfzUpRJ6RAvDRjGWtK ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:00:42.114 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T11:00:42.303 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:00:42.400 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:42 vm06 ceph-mon[49534]: mgrmap e12: vm06.luxohm(active, since 2s) 2026-03-10T11:00:42.400 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:42 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1516191891' entity='client.admin' 2026-03-10T11:00:43.154 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T11:00:43.154 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T11:00:43.432 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:00:43.884 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm07 2026-03-10T11:00:43.884 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T11:00:43.884 DEBUG:teuthology.orchestra.run.vm07:> dd of=/etc/ceph/ceph.conf 2026-03-10T11:00:43.900 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T11:00:43.900 DEBUG:teuthology.orchestra.run.vm07:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T11:00:43.956 INFO:tasks.cephadm:Adding host vm07 to orchestrator... 2026-03-10T11:00:43.956 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch host add vm07 2026-03-10T11:00:43.987 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2119003358' entity='client.admin' 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: Deploying daemon ceph-exporter.vm06 on vm06 2026-03-10T11:00:43.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:43 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:44.245 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:00:45.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:44 vm06 ceph-mon[49534]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:45.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:44 vm06 ceph-mon[49534]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: mgrmap e13: vm06.luxohm(active, since 6s) 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: Deploying cephadm binary to vm07 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:00:45.991 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:45 vm06 ceph-mon[49534]: Deploying daemon crash.vm06 on vm06 2026-03-10T11:00:46.136 INFO:teuthology.orchestra.run.vm06.stdout:Added host 'vm07' with addr '192.168.123.107' 2026-03-10T11:00:46.300 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch host ls --format=json 2026-03-10T11:00:46.646 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:00:46.904 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:00:46.904 INFO:teuthology.orchestra.run.vm06.stdout:[{"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}, {"addr": "192.168.123.107", "hostname": "vm07", "labels": [], "status": ""}] 2026-03-10T11:00:47.079 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T11:00:47.079 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd crush tunables default 2026-03-10T11:00:47.250 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: Deploying daemon node-exporter.vm06 on vm06 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:47.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:47 vm06 ceph-mon[49534]: Added host vm07 2026-03-10T11:00:48.087 INFO:teuthology.orchestra.run.vm06.stderr:adjusted tunables profile to default 2026-03-10T11:00:48.228 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:48 vm06 ceph-mon[49534]: from='client.14189 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:00:48.229 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:48 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2719589936' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T11:00:48.256 INFO:tasks.cephadm:Adding mon.vm06 on vm06 2026-03-10T11:00:48.256 INFO:tasks.cephadm:Adding mon.vm07 on vm07 2026-03-10T11:00:48.256 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch apply mon '2;vm06:192.168.123.106=vm06;vm07:192.168.123.107=vm07' 2026-03-10T11:00:48.428 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:48.464 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:48.719 INFO:teuthology.orchestra.run.vm07.stdout:Scheduled mon update... 2026-03-10T11:00:48.867 DEBUG:teuthology.orchestra.run.vm07:mon.vm07> sudo journalctl -f -n 0 -u ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm07.service 2026-03-10T11:00:48.869 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:48.869 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:49.065 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:49.095 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:49.336 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:49.336 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:49.337 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2719589936' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:49.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:49 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:50.482 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:50.482 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:50.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:50 vm06 ceph-mon[49534]: from='client.14193 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm06:192.168.123.106=vm06;vm07:192.168.123.107=vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:00:50.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:50 vm06 ceph-mon[49534]: Saving service mon spec with placement vm06:192.168.123.106=vm06;vm07:192.168.123.107=vm07;count:2 2026-03-10T11:00:50.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:50 vm06 ceph-mon[49534]: Deploying daemon alertmanager.vm06 on vm06 2026-03-10T11:00:50.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:50 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1069466' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:00:50.641 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:50.677 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:50.946 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:50.946 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:50.946 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:00:51.343 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:51 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2864870932' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:00:52.114 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:52.115 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:52.280 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:52.319 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:52.580 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:52.580 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:52.580 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:00:53.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: Deploying daemon grafana.vm06 on vm06 2026-03-10T11:00:53.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:53 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/898050104' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:00:53.746 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:53.747 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:53.898 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:53.932 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:54.187 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:54.187 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:54.187 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:00:55.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:54 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:00:55.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:54 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2986911066' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:00:55.357 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:55.357 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:55.519 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:55.556 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:55.801 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:55.801 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:55.801 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:00:56.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:55 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1182923788' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:00:56.967 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:56.967 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:57.135 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:57.174 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:57.450 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:57.450 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:57.450 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:00:58.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:57 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/3294644557' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:00:58.598 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:00:58.599 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:00:58.759 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:58.795 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:00:59.056 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:00:59.057 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:00:59.057 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:00.223 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:00.223 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:00.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1019115440' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:00:59 vm06 ceph-mon[49534]: Deploying daemon prometheus.vm06 on vm06 2026-03-10T11:01:00.388 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:00.424 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:00.694 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:00.694 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:00.694 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:01.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:00 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/249833955' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:01.861 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:01.861 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:02.020 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:02.055 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:02.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:01 vm06 ceph-mon[49534]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:02.310 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:02.311 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:02.311 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:03.484 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:03.484 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:03.641 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:03.675 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:03.932 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:03.932 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:03.932 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:04.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:03 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2694637821' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:04.700 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:04 vm06 ceph-mon[49534]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:04.700 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:04 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:04.700 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:04 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1926312342' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:05.101 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:05.101 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:05.250 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:05.281 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:05.522 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:05.522 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:05.522 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:06.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:06 vm06 ceph-mon[49534]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:06.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:06 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:06.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:06 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:06.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:06 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' 2026-03-10T11:01:06.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:06 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T11:01:06.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:06 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2398235047' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:06.684 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:06.684 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:06.839 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:06.871 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:07.125 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:07.125 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:07.125 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:07.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:07 vm06 ceph-mon[49534]: from='mgr.14162 192.168.123.106:0/2299339046' entity='mgr.vm06.luxohm' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T11:01:07.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:07 vm06 ceph-mon[49534]: mgrmap e14: vm06.luxohm(active, since 27s) 2026-03-10T11:01:08.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:08 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/3924722135' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:08.271 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:08.271 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:08.428 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:08.462 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:08.720 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:08.720 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:08.720 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:09.255 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/669491698' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: Active manager daemon vm06.luxohm restarted 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: Activating manager daemon vm06.luxohm 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: mgrmap e15: vm06.luxohm(active, starting, since 0.00516489s) 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr metadata", "who": "vm06.luxohm", "id": "vm06.luxohm"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: Manager daemon vm06.luxohm is now available 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/mirror_snapshot_schedule"}]: dispatch 2026-03-10T11:01:09.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.luxohm/trash_purge_schedule"}]: dispatch 2026-03-10T11:01:09.892 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:09.893 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:10.075 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:10.125 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:10.454 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:10.454 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:10.454 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:10.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:10 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:10.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:10 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:10.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:10 vm06 ceph-mon[49534]: mgrmap e16: vm06.luxohm(active, since 1.00849s) 2026-03-10T11:01:10.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:10 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:11.605 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:11.605 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:11.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: [10/Mar/2026:11:01:10] ENGINE Bus STARTING 2026-03-10T11:01:11.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: [10/Mar/2026:11:01:10] ENGINE Serving on http://192.168.123.106:8765 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: [10/Mar/2026:11:01:10] ENGINE Serving on https://192.168.123.106:7150 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: [10/Mar/2026:11:01:10] ENGINE Bus STARTED 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: [10/Mar/2026:11:01:10] ENGINE Client ('192.168.123.106', 49414) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2349564476' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:11.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:11 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:01:11.801 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T11:01:11.852 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:12.146 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:12.146 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:12.148 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm06:/etc/ceph/ceph.conf 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm07:/etc/ceph/ceph.conf 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm06:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm07:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: mgrmap e17: vm06.luxohm(active, since 2s) 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm06:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.client.admin.keyring 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: Updating vm07:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.client.admin.keyring 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/538322892' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T11:01:13.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:12 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:13.323 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:13.323 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:13.529 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:13.802 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:13.802 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:13.803 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: Deploying daemon ceph-exporter.vm07 on vm07 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T11:01:14.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:13 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:15.001 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:15.001 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: Deploying daemon crash.vm07 on vm07 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/377827886' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:15.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:14 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:15.165 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:15.425 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:15.425 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:15.425 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:16.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:15 vm06 ceph-mon[49534]: Deploying daemon node-exporter.vm07 on vm07 2026-03-10T11:01:16.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:15 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/158753335' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:16.588 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:16.588 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:16.786 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:17.093 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:17.093 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:17.093 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:17.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:17 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/4011801074' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:18.246 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:18.247 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.rgmael", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm07.rgmael", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: Deploying daemon mgr.vm07.rgmael on vm07 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:18 vm06 ceph-mon[49534]: Deploying daemon mon.vm07 on vm07 2026-03-10T11:01:18.631 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:19.050 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:19.050 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:00:12.539852Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T11:01:19.050 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T11:01:19.669 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:19 vm07 ceph-mon[56438]: mon.vm07@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-03-10T11:01:20.232 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T11:01:20.232 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mon dump -f json 2026-03-10T11:01:20.403 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm07/config 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: mon.vm06 calling monitor election 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: mon.vm07 calling monitor election 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.rgmael/crt"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: mon.vm06 is new leader, mons vm06,vm07 in quorum (ranks 0,1) 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: monmap epoch 2 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: last_changed 2026-03-10T11:01:19.450963+0000 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: created 2026-03-10T11:00:12.539852+0000 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: min_mon_release 19 (squid) 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: election_strategy: 1 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: 1: [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon.vm07 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: fsmap 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: mgrmap e17: vm06.luxohm(active, since 15s) 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: overall HEALTH_OK 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: Standby manager daemon vm07.rgmael started 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.rgmael/key"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T11:01:24.850 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:24 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:01:25.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: mon.vm06 calling monitor election 2026-03-10T11:01:25.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: mon.vm07 calling monitor election 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.rgmael/crt"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: mon.vm06 is new leader, mons vm06,vm07 in quorum (ranks 0,1) 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: monmap epoch 2 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: last_changed 2026-03-10T11:01:19.450963+0000 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: created 2026-03-10T11:00:12.539852+0000 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: min_mon_release 19 (squid) 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: election_strategy: 1 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: 1: [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon.vm07 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: fsmap 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: mgrmap e17: vm06.luxohm(active, since 15s) 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: overall HEALTH_OK 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: Standby manager daemon vm07.rgmael started 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.rgmael/key"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.? 192.168.123.107:0/3901454389' entity='mgr.vm07.rgmael' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T11:01:25.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:24 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.109 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T11:01:25.109 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":2,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","modified":"2026-03-10T11:01:19.450963Z","created":"2026-03-10T11:00:12.539852Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm07","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:3300","nonce":0},{"type":"v1","addr":"192.168.123.107:6789","nonce":0}]},"addr":"192.168.123.107:6789/0","public_addr":"192.168.123.107:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T11:01:25.109 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 2 2026-03-10T11:01:25.269 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T11:01:25.269 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph config generate-minimal-conf 2026-03-10T11:01:25.467 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:25.731 INFO:teuthology.orchestra.run.vm06.stdout:# minimal ceph.conf for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:01:25.732 INFO:teuthology.orchestra.run.vm06.stdout:[global] 2026-03-10T11:01:25.732 INFO:teuthology.orchestra.run.vm06.stdout: fsid = 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:01:25.732 INFO:teuthology.orchestra.run.vm06.stdout: mon_host = [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: mgrmap e18: vm06.luxohm(active, since 15s), standbys: vm07.rgmael 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr metadata", "who": "vm07.rgmael", "id": "vm07.rgmael"}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2212142355' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T11:01:25.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:25 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:25.890 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T11:01:25.890 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:01:25.890 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: mgrmap e18: vm06.luxohm(active, since 15s), standbys: vm07.rgmael 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr metadata", "who": "vm07.rgmael", "id": "vm07.rgmael"}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/2212142355' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T11:01:25.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:25 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:25.923 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:01:25.923 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T11:01:25.989 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T11:01:25.990 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T11:01:26.016 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T11:01:26.016 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T11:01:26.081 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T11:01:26.081 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:01:26.081 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T11:01:26.098 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T11:01:26.098 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-10T11:01:26.158 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-10T11:01:26.158 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-10T11:01:26.158 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-10T11:01:26.158 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-10T11:01:26.158 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-10T11:01:26.159 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T11:01:26.159 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T11:01:26.159 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-10 11:00:43.939045516 +0000 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-10 10:59:13.974033214 +0000 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-10 10:59:13.974033214 +0000 2026-03-10T11:01:26.214 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-10 10:56:27.318000000 +0000 2026-03-10T11:01:26.215 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T11:01:26.276 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-10T11:01:26.276 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-10T11:01:26.276 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000108182 s, 4.7 MB/s 2026-03-10T11:01:26.277 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T11:01:26.340 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-10 11:00:43.989045569 +0000 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-10 10:59:13.969033210 +0000 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-10 10:59:13.969033210 +0000 2026-03-10T11:01:26.399 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-10 10:56:27.326000000 +0000 2026-03-10T11:01:26.399 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T11:01:26.468 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-10T11:01:26.468 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-10T11:01:26.468 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000167734 s, 3.1 MB/s 2026-03-10T11:01:26.469 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T11:01:26.529 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-10 11:00:44.028045611 +0000 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-10 10:59:13.984033224 +0000 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-10 10:59:13.984033224 +0000 2026-03-10T11:01:26.585 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-10 10:56:27.330000000 +0000 2026-03-10T11:01:26.585 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T11:01:26.647 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-10T11:01:26.647 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-10T11:01:26.647 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000151353 s, 3.4 MB/s 2026-03-10T11:01:26.648 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T11:01:26.709 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Updating vm06:/etc/ceph/ceph.conf 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Updating vm07:/etc/ceph/ceph.conf 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Updating vm07:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Updating vm06:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Reconfiguring mon.vm06 (unknown last config time)... 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Reconfiguring daemon mon.vm06 on vm06 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/572222707' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Reconfiguring mgr.vm06.luxohm (unknown last config time)... 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.luxohm", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: Reconfiguring daemon mgr.vm06.luxohm on vm06 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T11:01:26.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:26 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-10 11:00:44.064045650 +0000 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-10 10:59:13.973033213 +0000 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-10 10:59:13.973033213 +0000 2026-03-10T11:01:26.770 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-10 10:56:27.338000000 +0000 2026-03-10T11:01:26.770 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T11:01:26.842 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-10T11:01:26.842 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-10T11:01:26.842 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000157305 s, 3.3 MB/s 2026-03-10T11:01:26.843 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T11:01:26.903 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T11:01:26.903 DEBUG:teuthology.orchestra.run.vm07:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T11:01:26.915 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Updating vm06:/etc/ceph/ceph.conf 2026-03-10T11:01:26.915 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Updating vm07:/etc/ceph/ceph.conf 2026-03-10T11:01:26.915 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Updating vm07:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Updating vm06:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/config/ceph.conf 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Reconfiguring mon.vm06 (unknown last config time)... 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Reconfiguring daemon mon.vm06 on vm06 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/572222707' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Reconfiguring mgr.vm06.luxohm (unknown last config time)... 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.luxohm", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: Reconfiguring daemon mgr.vm06.luxohm on vm06 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T11:01:26.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:26 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:26.918 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T11:01:26.918 DEBUG:teuthology.orchestra.run.vm07:> ls /dev/[sv]d? 2026-03-10T11:01:26.973 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vda 2026-03-10T11:01:26.973 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdb 2026-03-10T11:01:26.973 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdc 2026-03-10T11:01:26.973 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdd 2026-03-10T11:01:26.973 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vde 2026-03-10T11:01:26.973 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T11:01:26.973 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T11:01:26.973 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdb 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdb 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,10 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 11:01:11.301621545 +0000 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 10:59:13.301095572 +0000 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 10:59:13.301095572 +0000 2026-03-10T11:01:27.029 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-10 10:55:56.302000000 +0000 2026-03-10T11:01:27.029 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T11:01:27.091 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T11:01:27.091 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T11:01:27.091 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000159538 s, 3.2 MB/s 2026-03-10T11:01:27.092 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T11:01:27.148 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdc 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdc 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 11:01:11.325621573 +0000 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 10:59:13.274095539 +0000 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 10:59:13.274095539 +0000 2026-03-10T11:01:27.205 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-10 10:55:56.314000000 +0000 2026-03-10T11:01:27.206 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T11:01:27.267 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T11:01:27.267 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T11:01:27.267 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000155471 s, 3.3 MB/s 2026-03-10T11:01:27.268 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T11:01:27.324 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdd 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdd 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 11:01:11.351621603 +0000 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 10:59:13.282095549 +0000 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 10:59:13.282095549 +0000 2026-03-10T11:01:27.378 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-10 10:55:56.322000000 +0000 2026-03-10T11:01:27.379 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T11:01:27.440 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T11:01:27.440 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T11:01:27.440 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000150261 s, 3.4 MB/s 2026-03-10T11:01:27.441 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T11:01:27.497 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vde 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vde 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 11:01:11.374621630 +0000 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 10:59:13.245095503 +0000 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 10:59:13.245095503 +0000 2026-03-10T11:01:27.553 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-10 10:55:56.331000000 +0000 2026-03-10T11:01:27.553 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T11:01:27.617 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T11:01:27.617 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T11:01:27.617 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000156593 s, 3.3 MB/s 2026-03-10T11:01:27.618 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T11:01:27.674 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch apply osd --all-available-devices 2026-03-10T11:01:27.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-03-10T11:01:27.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-03-10T11:01:27.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: Reconfiguring crash.vm06 (monmap changed)... 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: Reconfiguring daemon crash.vm06 on vm06 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: Reconfiguring alertmanager.vm06 (dependencies changed)... 2026-03-10T11:01:27.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:27 vm06 ceph-mon[49534]: Reconfiguring daemon alertmanager.vm06 on vm06 2026-03-10T11:01:27.870 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm07/config 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: Reconfiguring crash.vm06 (monmap changed)... 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: Reconfiguring daemon crash.vm06 on vm06 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: Reconfiguring alertmanager.vm06 (dependencies changed)... 2026-03-10T11:01:27.942 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:27 vm07 ceph-mon[56438]: Reconfiguring daemon alertmanager.vm06 on vm06 2026-03-10T11:01:28.106 INFO:teuthology.orchestra.run.vm07.stdout:Scheduled osd.all-available-devices update... 2026-03-10T11:01:28.251 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T11:01:28.251 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:28.527 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:28.767 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:28.918 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T11:01:29.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:29 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:29.362 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:29 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:29.362 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:29 vm06 ceph-mon[49534]: Reconfiguring grafana.vm06 (dependencies changed)... 2026-03-10T11:01:29.362 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:29 vm06 ceph-mon[49534]: Reconfiguring daemon grafana.vm06 on vm06 2026-03-10T11:01:29.362 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:29 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:29.362 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:29 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2736213894' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:29.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:29 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:29.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:29 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:29.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:29 vm07 ceph-mon[56438]: Reconfiguring grafana.vm06 (dependencies changed)... 2026-03-10T11:01:29.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:29 vm07 ceph-mon[56438]: Reconfiguring daemon grafana.vm06 on vm06 2026-03-10T11:01:29.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:29 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:29.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:29 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2736213894' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:29.919 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:30.215 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: from='client.14254 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: Marking host: vm06 for OSDSpec preview refresh. 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: Marking host: vm07 for OSDSpec preview refresh. 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: Saving service osd.all-available-devices spec with placement * 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: Reconfiguring prometheus.vm06 (dependencies changed)... 2026-03-10T11:01:30.240 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:30 vm06 ceph-mon[49534]: Reconfiguring daemon prometheus.vm06 on vm06 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: from='client.14254 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: Marking host: vm06 for OSDSpec preview refresh. 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: Marking host: vm07 for OSDSpec preview refresh. 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: Saving service osd.all-available-devices spec with placement * 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: Reconfiguring prometheus.vm06 (dependencies changed)... 2026-03-10T11:01:30.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:30 vm07 ceph-mon[56438]: Reconfiguring daemon prometheus.vm06 on vm06 2026-03-10T11:01:30.502 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:30.655 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T11:01:31.655 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/45209519' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: Reconfiguring ceph-exporter.vm07 (monmap changed)... 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: Reconfiguring daemon ceph-exporter.vm07 on vm07 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: Reconfiguring crash.vm07 (monmap changed)... 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: Reconfiguring daemon crash.vm07 on vm07 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.rgmael", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T11:01:31.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:31 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:31.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/45209519' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:31.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: Reconfiguring ceph-exporter.vm07 (monmap changed)... 2026-03-10T11:01:31.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T11:01:31.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: Reconfiguring daemon ceph-exporter.vm07 on vm07 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: Reconfiguring crash.vm07 (monmap changed)... 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: Reconfiguring daemon crash.vm07 on vm07 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.rgmael", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T11:01:31.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:31 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:31.821 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:32.077 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:32.248 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: Reconfiguring mgr.vm07.rgmael (monmap changed)... 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: Reconfiguring daemon mgr.vm07.rgmael on vm07 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: Reconfiguring mon.vm07 (monmap changed)... 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: Reconfiguring daemon mon.vm07 on vm07 2026-03-10T11:01:32.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:01:32.734 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:32 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1816708394' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: Reconfiguring mgr.vm07.rgmael (monmap changed)... 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: Reconfiguring daemon mgr.vm07.rgmael on vm07 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: Reconfiguring mon.vm07 (monmap changed)... 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: Reconfiguring daemon mon.vm07 on vm07 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:01:32.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:32 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1816708394' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:33.248 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:33.502 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:33.777 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:33.932 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T11:01:34.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:33 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T11:01:34.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:34.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T11:01:34.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:33 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1118800489' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2201713366' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9b76adb1-8a8b-4cad-8b2b-f9ead69e56db"}]: dispatch 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9b76adb1-8a8b-4cad-8b2b-f9ead69e56db"}]: dispatch 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9b76adb1-8a8b-4cad-8b2b-f9ead69e56db"}]': finished 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3906670243' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6dd37bb9-b004-4153-bb97-565b64a9f2d7"}]: dispatch 2026-03-10T11:01:34.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:34 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3906670243' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6dd37bb9-b004-4153-bb97-565b64a9f2d7"}]': finished 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1118800489' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/2201713366' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9b76adb1-8a8b-4cad-8b2b-f9ead69e56db"}]: dispatch 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9b76adb1-8a8b-4cad-8b2b-f9ead69e56db"}]: dispatch 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9b76adb1-8a8b-4cad-8b2b-f9ead69e56db"}]': finished 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3906670243' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6dd37bb9-b004-4153-bb97-565b64a9f2d7"}]: dispatch 2026-03-10T11:01:34.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:34 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3906670243' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6dd37bb9-b004-4153-bb97-565b64a9f2d7"}]': finished 2026-03-10T11:01:34.933 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:35.134 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:35.380 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:35.553 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773140494,"num_remapped_pgs":0} 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2200847713' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1046638853' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:35.721 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:35 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3907028009' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/2200847713' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1046638853' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:36.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:35 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3907028009' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:36.554 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:36.732 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:36.962 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:37.102 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:37 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2780228848' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:37.123 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773140494,"num_remapped_pgs":0} 2026-03-10T11:01:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:37 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2780228848' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:38.124 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:38.245 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:38 vm06 ceph-mon[49534]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:38.289 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:38 vm07 ceph-mon[56438]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:38.304 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:38.576 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:38.725 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773140498,"num_remapped_pgs":0} 2026-03-10T11:01:39.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/2786210486' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6f8f1c30-5206-4f4f-9a3e-10f967c47c15"}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6f8f1c30-5206-4f4f-9a3e-10f967c47c15"}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6f8f1c30-5206-4f4f-9a3e-10f967c47c15"}]': finished 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: osdmap e8: 3 total, 0 up, 3 in 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2207434150' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "794f92ac-9222-4621-99c2-6fec07b073f7"}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2207434150' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "794f92ac-9222-4621-99c2-6fec07b073f7"}]': finished 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: osdmap e9: 4 total, 0 up, 4 in 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3015599382' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1686145450' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3308376857' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:39.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:39 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/2786210486' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6f8f1c30-5206-4f4f-9a3e-10f967c47c15"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6f8f1c30-5206-4f4f-9a3e-10f967c47c15"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6f8f1c30-5206-4f4f-9a3e-10f967c47c15"}]': finished 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: osdmap e8: 3 total, 0 up, 3 in 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2207434150' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "794f92ac-9222-4621-99c2-6fec07b073f7"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2207434150' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "794f92ac-9222-4621-99c2-6fec07b073f7"}]': finished 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: osdmap e9: 4 total, 0 up, 4 in 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3015599382' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/1686145450' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3308376857' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:39 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:39.725 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:39.889 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:40.148 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:40.282 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:40 vm06 ceph-mon[49534]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:40.304 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773140498,"num_remapped_pgs":0} 2026-03-10T11:01:40.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:40 vm07 ceph-mon[56438]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:41.305 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:41 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2627338782' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:41.351 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:41 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2627338782' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:41.477 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:41.697 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:41.868 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773140498,"num_remapped_pgs":0} 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1622133791' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/885212586' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "99c67954-641c-498f-8710-8bc301498be2"}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "99c67954-641c-498f-8710-8bc301498be2"}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "99c67954-641c-498f-8710-8bc301498be2"}]': finished 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: osdmap e10: 5 total, 0 up, 5 in 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:42.241 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:42 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1622133791' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/885212586' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "99c67954-641c-498f-8710-8bc301498be2"}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "99c67954-641c-498f-8710-8bc301498be2"}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "99c67954-641c-498f-8710-8bc301498be2"}]': finished 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: osdmap e10: 5 total, 0 up, 5 in 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:42.297 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:42 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:42.869 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:43.049 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:43.112 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1103436972' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3131649630' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b"}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3131649630' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b"}]': finished 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: osdmap e11: 6 total, 0 up, 6 in 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:43.113 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:43 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1822490277' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:43.275 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/1103436972' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3131649630' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b"}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3131649630' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b"}]': finished 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: osdmap e11: 6 total, 0 up, 6 in 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:43.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:43 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1822490277' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:43.446 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773140502,"num_remapped_pgs":0} 2026-03-10T11:01:44.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:44 vm07 ceph-mon[56438]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:44.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:44 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1860130714' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:44.447 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:44.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:44 vm06 ceph-mon[49534]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:44.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:44 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1860130714' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:44.622 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:44.855 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:45.039 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773140502,"num_remapped_pgs":0} 2026-03-10T11:01:45.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:45 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/572777773' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:45.339 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:45 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/572777773' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:46.040 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/3266394210' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe73d692-18c6-496a-8ee3-d10ebe2e97c5"}]: dispatch 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe73d692-18c6-496a-8ee3-d10ebe2e97c5"}]: dispatch 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fe73d692-18c6-496a-8ee3-d10ebe2e97c5"}]': finished 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: osdmap e12: 7 total, 0 up, 7 in 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:46.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:46.151 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:46.151 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:46.151 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:46.151 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:46 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/1779127323' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:46.272 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:46.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/3266394210' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe73d692-18c6-496a-8ee3-d10ebe2e97c5"}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe73d692-18c6-496a-8ee3-d10ebe2e97c5"}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fe73d692-18c6-496a-8ee3-d10ebe2e97c5"}]': finished 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: osdmap e12: 7 total, 0 up, 7 in 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:46 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/1779127323' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:46.543 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:46.727 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:47.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/877692585' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "11b14356-8f96-4856-878c-cac368917846"}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/877692585' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "11b14356-8f96-4856-878c-cac368917846"}]': finished 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: osdmap e13: 8 total, 0 up, 8 in 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2127426545' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:47.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:47 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/4075667729' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/877692585' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "11b14356-8f96-4856-878c-cac368917846"}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/877692585' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "11b14356-8f96-4856-878c-cac368917846"}]': finished 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: osdmap e13: 8 total, 0 up, 8 in 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2127426545' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:47.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:47 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/4075667729' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T11:01:47.728 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:47.899 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:48.122 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:48.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:48 vm06 ceph-mon[49534]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:48.293 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:48.406 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:48 vm07 ceph-mon[56438]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:49.294 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:49.316 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:49 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1171867317' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:49.327 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:49 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1171867317' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:49.547 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:49.788 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:49.947 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:50.102 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:50 vm06 ceph-mon[49534]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:50.102 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:50 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T11:01:50.102 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:50 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:50.102 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:50 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2072840287' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:50.125 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:50 vm07 ceph-mon[56438]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:50.125 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:50 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T11:01:50.125 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:50 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:50.125 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:50 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2072840287' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:50.948 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:51.188 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:51.218 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:51 vm06 ceph-mon[49534]: Deploying daemon osd.0 on vm07 2026-03-10T11:01:51.218 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:51 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T11:01:51.218 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:51 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:51.408 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:51 vm07 ceph-mon[56438]: Deploying daemon osd.0 on vm07 2026-03-10T11:01:51.408 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:51 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T11:01:51.408 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:51 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:51.584 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:51.744 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: Deploying daemon osd.1 on vm06 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2190458732' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T11:01:52.237 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:52 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: Deploying daemon osd.1 on vm06 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2190458732' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T11:01:52.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:52 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:52.745 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:52.966 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:53.237 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:53 vm06 ceph-mon[49534]: Deploying daemon osd.2 on vm07 2026-03-10T11:01:53.237 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:53 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:53.237 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:53 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:53.237 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:53 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T11:01:53.237 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:53 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:53.243 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:53.404 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:53.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:53 vm07 ceph-mon[56438]: Deploying daemon osd.2 on vm07 2026-03-10T11:01:53.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:53 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:53.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:53 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:53.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:53 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T11:01:53.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:53 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: Deploying daemon osd.3 on vm06 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/809796446' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: Deploying daemon osd.4 on vm07 2026-03-10T11:01:54.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:54 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: Deploying daemon osd.3 on vm06 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/809796446' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: Deploying daemon osd.4 on vm07 2026-03-10T11:01:54.298 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:54 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:01:54.405 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:54.653 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:54.954 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:55.138 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":14,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: osdmap e14: 8 total, 0 up, 8 in 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:55.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:55 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1818080030' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: osdmap e14: 8 total, 0 up, 8 in 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T11:01:55.348 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:55 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1818080030' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:56.139 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: osdmap e15: 8 total, 0 up, 8 in 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: Deploying daemon osd.5 on vm06 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055] boot 2026-03-10T11:01:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: osdmap e16: 8 total, 1 up, 8 in 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:56.167 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:56 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: osdmap e15: 8 total, 0 up, 8 in 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: Deploying daemon osd.5 on vm06 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055]' entity='osd.0' 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T11:01:56.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: osd.0 [v2:192.168.123.107:6800/2148012055,v1:192.168.123.107:6801/2148012055] boot 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: osdmap e16: 8 total, 1 up, 8 in 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:56.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:56 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:56.374 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:56.699 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:56.905 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":16,"num_osds":8,"num_up_osds":1,"osd_up_since":1773140516,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: Deploying daemon osd.6 on vm07 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3494573652' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: pgmap v28: 0 pgs: ; 0 B data, 246 MiB used, 20 GiB / 20 GiB avail 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: osdmap e17: 8 total, 1 up, 8 in 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:57.643 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:57 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: Deploying daemon osd.6 on vm07 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3494573652' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: pgmap v28: 0 pgs: ; 0 B data, 246 MiB used, 20 GiB / 20 GiB avail 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427]' entity='osd.1' 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: osdmap e17: 8 total, 1 up, 8 in 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:57.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:57 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:57.906 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:58.077 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:01:58.362 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:01:58.541 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":18,"num_osds":8,"num_up_osds":3,"osd_up_since":1773140518,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427] boot 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138] boot 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: osdmap e18: 8 total, 3 up, 8 in 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:58.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1303919334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='osd.4 [v2:192.168.123.107:6816/2858289592,v1:192.168.123.107:6817/2858289592]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T11:01:58.667 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:58 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: osd.1 [v2:192.168.123.106:6802/1755791427,v1:192.168.123.106:6803/1755791427] boot 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: osd.2 [v2:192.168.123.107:6808/2086159138,v1:192.168.123.107:6809/2086159138] boot 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: osdmap e18: 8 total, 3 up, 8 in 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1303919334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='osd.4 [v2:192.168.123.107:6816/2858289592,v1:192.168.123.107:6817/2858289592]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T11:01:58.716 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:58 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:01:59.542 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:01:59.826 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:01:59.826 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:01:59.826 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: Deploying daemon osd.7 on vm06 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: pgmap v31: 0 pgs: ; 0 B data, 246 MiB used, 20 GiB / 20 GiB avail 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='osd.4 [v2:192.168.123.107:6816/2858289592,v1:192.168.123.107:6817/2858289592]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: osdmap e19: 8 total, 3 up, 8 in 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:59.827 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:01:59 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: Deploying daemon osd.7 on vm06 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: pgmap v31: 0 pgs: ; 0 B data, 246 MiB used, 20 GiB / 20 GiB avail 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='osd.4 [v2:192.168.123.107:6816/2858289592,v1:192.168.123.107:6817/2858289592]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: osdmap e19: 8 total, 3 up, 8 in 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:01:59.867 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:01:59 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T11:01:59.949 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:00.490 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: osdmap e20: 8 total, 3 up, 8 in 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T11:02:00.685 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2843080681' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:00.699 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":20,"num_osds":8,"num_up_osds":3,"osd_up_since":1773140518,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: osdmap e20: 8 total, 3 up, 8 in 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664]' entity='osd.3' 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2843080681' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 sudo[67314]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 sudo[67314]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 sudo[67314]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-10T11:02:00.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:00 vm07 sudo[67314]: pam_unix(sudo:session): session closed for user root 2026-03-10T11:02:01.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 sudo[75543]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-10T11:02:01.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 sudo[75543]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-10T11:02:01.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 sudo[75543]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-10T11:02:01.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:00 vm06 sudo[75543]: pam_unix(sudo:session): session closed for user root 2026-03-10T11:02:01.700 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.6 [v2:192.168.123.107:6824/3420820315,v1:192.168.123.107:6825/3420820315]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 299 MiB used, 60 GiB / 60 GiB avail 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.4 ' entity='osd.4' 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664] boot 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: osd.4 [v2:192.168.123.107:6816/2858289592,v1:192.168.123.107:6817/2858289592] boot 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: osdmap e21: 8 total, 5 up, 8 in 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.6 [v2:192.168.123.107:6824/3420820315,v1:192.168.123.107:6825/3420820315]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:01.810 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:01 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.6 [v2:192.168.123.107:6824/3420820315,v1:192.168.123.107:6825/3420820315]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 299 MiB used, 60 GiB / 60 GiB avail 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.4 ' entity='osd.4' 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: osd.3 [v2:192.168.123.106:6810/1585385664,v1:192.168.123.106:6811/1585385664] boot 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: osd.4 [v2:192.168.123.107:6816/2858289592,v1:192.168.123.107:6817/2858289592] boot 2026-03-10T11:02:01.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: osdmap e21: 8 total, 5 up, 8 in 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.6 [v2:192.168.123.107:6824/3420820315,v1:192.168.123.107:6825/3420820315]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:01.917 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:01 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:01.942 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:02.288 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:02.510 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":22,"num_osds":8,"num_up_osds":5,"osd_up_since":1773140521,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:02:02.915 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: mgrmap e19: vm06.luxohm(active, since 52s), standbys: vm07.rgmael 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: osdmap e22: 8 total, 5 up, 8 in 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:02.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:02 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1981304137' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: mgrmap e19: vm06.luxohm(active, since 52s), standbys: vm07.rgmael 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: osdmap e22: 8 total, 5 up, 8 in 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:03.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:02 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1981304137' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:03.511 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:02:03.760 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: pgmap v37: 1 pgs: 1 unknown; 0 B data, 532 MiB used, 99 GiB / 100 GiB avail 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: osd.6 [v2:192.168.123.107:6824/3420820315,v1:192.168.123.107:6825/3420820315] boot 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693] boot 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: osdmap e23: 8 total, 7 up, 8 in 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.761 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:03 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.763 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:03.968 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: pgmap v37: 1 pgs: 1 unknown; 0 B data, 532 MiB used, 99 GiB / 100 GiB avail 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693]' entity='osd.5' 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: osd.6 [v2:192.168.123.107:6824/3420820315,v1:192.168.123.107:6825/3420820315] boot 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: osd.5 [v2:192.168.123.106:6818/1397229693,v1:192.168.123.106:6819/1397229693] boot 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: osdmap e23: 8 total, 7 up, 8 in 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:03.969 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:03 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:04.048 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:04.212 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":23,"num_osds":8,"num_up_osds":7,"osd_up_since":1773140523,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":0} 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3422052260' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: osdmap e24: 8 total, 7 up, 8 in 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:04.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:04 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3422052260' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: osdmap e24: 8 total, 7 up, 8 in 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:04 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:02:05.213 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd stat -f json 2026-03-10T11:02:05.418 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:05.651 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: Detected new or changed devices on vm07 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 685 MiB used, 139 GiB / 140 GiB avail 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739] boot 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: osdmap e25: 8 total, 8 up, 8 in 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:05.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:05 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/454797160' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:05.813 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":25,"num_osds":8,"num_up_osds":8,"osd_up_since":1773140525,"num_in_osds":8,"osd_in_since":1773140506,"num_remapped_pgs":1} 2026-03-10T11:02:05.814 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd dump --format=json 2026-03-10T11:02:05.972 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: Detected new or changed devices on vm07 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 685 MiB used, 139 GiB / 140 GiB avail 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: osd.7 [v2:192.168.123.106:6826/1003970739,v1:192.168.123.106:6827/1003970739] boot 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: osdmap e25: 8 total, 8 up, 8 in 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T11:02:06.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:05 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/454797160' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T11:02:06.190 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:06.191 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":26,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","created":"2026-03-10T11:00:13.775092+0000","modified":"2026-03-10T11:02:05.870377+0000","last_up_change":"2026-03-10T11:02:05.252126+0000","last_in_change":"2026-03-10T11:01:46.084412+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":13,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T11:01:58.954193+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"9b76adb1-8a8b-4cad-8b2b-f9ead69e56db","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6801","nonce":2148012055}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6803","nonce":2148012055}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6807","nonce":2148012055}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6805","nonce":2148012055}]},"public_addr":"192.168.123.107:6801/2148012055","cluster_addr":"192.168.123.107:6803/2148012055","heartbeat_back_addr":"192.168.123.107:6807/2148012055","heartbeat_front_addr":"192.168.123.107:6805/2148012055","state":["exists","up"]},{"osd":1,"uuid":"6dd37bb9-b004-4153-bb97-565b64a9f2d7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6803","nonce":1755791427}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6805","nonce":1755791427}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6809","nonce":1755791427}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6807","nonce":1755791427}]},"public_addr":"192.168.123.106:6803/1755791427","cluster_addr":"192.168.123.106:6805/1755791427","heartbeat_back_addr":"192.168.123.106:6809/1755791427","heartbeat_front_addr":"192.168.123.106:6807/1755791427","state":["exists","up"]},{"osd":2,"uuid":"6f8f1c30-5206-4f4f-9a3e-10f967c47c15","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6809","nonce":2086159138}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6811","nonce":2086159138}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6815","nonce":2086159138}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6813","nonce":2086159138}]},"public_addr":"192.168.123.107:6809/2086159138","cluster_addr":"192.168.123.107:6811/2086159138","heartbeat_back_addr":"192.168.123.107:6815/2086159138","heartbeat_front_addr":"192.168.123.107:6813/2086159138","state":["exists","up"]},{"osd":3,"uuid":"794f92ac-9222-4621-99c2-6fec07b073f7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6811","nonce":1585385664}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6813","nonce":1585385664}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6817","nonce":1585385664}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6815","nonce":1585385664}]},"public_addr":"192.168.123.106:6811/1585385664","cluster_addr":"192.168.123.106:6813/1585385664","heartbeat_back_addr":"192.168.123.106:6817/1585385664","heartbeat_front_addr":"192.168.123.106:6815/1585385664","state":["exists","up"]},{"osd":4,"uuid":"99c67954-641c-498f-8710-8bc301498be2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6816","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6817","nonce":2858289592}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6818","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6819","nonce":2858289592}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6822","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6823","nonce":2858289592}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6820","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6821","nonce":2858289592}]},"public_addr":"192.168.123.107:6817/2858289592","cluster_addr":"192.168.123.107:6819/2858289592","heartbeat_back_addr":"192.168.123.107:6823/2858289592","heartbeat_front_addr":"192.168.123.107:6821/2858289592","state":["exists","up"]},{"osd":5,"uuid":"bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6819","nonce":1397229693}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6821","nonce":1397229693}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6825","nonce":1397229693}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6823","nonce":1397229693}]},"public_addr":"192.168.123.106:6819/1397229693","cluster_addr":"192.168.123.106:6821/1397229693","heartbeat_back_addr":"192.168.123.106:6825/1397229693","heartbeat_front_addr":"192.168.123.106:6823/1397229693","state":["exists","up"]},{"osd":6,"uuid":"fe73d692-18c6-496a-8ee3-d10ebe2e97c5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6824","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6825","nonce":3420820315}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6826","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6827","nonce":3420820315}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6830","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6831","nonce":3420820315}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6828","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6829","nonce":3420820315}]},"public_addr":"192.168.123.107:6825/3420820315","cluster_addr":"192.168.123.107:6827/3420820315","heartbeat_back_addr":"192.168.123.107:6831/3420820315","heartbeat_front_addr":"192.168.123.107:6829/3420820315","state":["exists","up"]},{"osd":7,"uuid":"11b14356-8f96-4856-878c-cac368917846","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6827","nonce":1003970739}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6829","nonce":1003970739}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6832","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6833","nonce":1003970739}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6831","nonce":1003970739}]},"public_addr":"192.168.123.106:6827/1003970739","cluster_addr":"192.168.123.106:6829/1003970739","heartbeat_back_addr":"192.168.123.106:6833/1003970739","heartbeat_front_addr":"192.168.123.106:6831/1003970739","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:54.624282+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:55.336254+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:56.570624+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:58.546214+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:59.425561+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:01.433622+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:01.769844+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:03.819876+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.106:6801/3060625394":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/3166855479":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1688144993":"2026-03-11T11:00:38.722795+0000","192.168.123.106:0/2264771765":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1742174895":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6800/2836985270":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6801/1615358920":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/297866357":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/1970771380":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6800/3060625394":"2026-03-11T11:01:08.907961+0000","192.168.123.106:6800/1615358920":"2026-03-11T11:00:25.063686+0000","192.168.123.106:6801/2836985270":"2026-03-11T11:00:38.722795+0000","192.168.123.106:0/3240089081":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1846626197":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/2357515870":"2026-03-11T11:01:08.907961+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T11:02:06.336 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T11:01:58.954193+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '21', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-10T11:02:06.336 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd pool get .mgr pg_num 2026-03-10T11:02:06.501 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:06.737 INFO:teuthology.orchestra.run.vm06.stdout:pg_num: 1 2026-03-10T11:02:06.881 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:06 vm06 ceph-mon[49534]: purged_snaps scrub starts 2026-03-10T11:02:06.881 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:06 vm06 ceph-mon[49534]: purged_snaps scrub ok 2026-03-10T11:02:06.881 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:06 vm06 ceph-mon[49534]: Detected new or changed devices on vm06 2026-03-10T11:02:06.881 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:06 vm06 ceph-mon[49534]: osdmap e26: 8 total, 8 up, 8 in 2026-03-10T11:02:06.881 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:06 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1418024839' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T11:02:06.912 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T11:02:06.912 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T11:02:07.088 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:07.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:06 vm07 ceph-mon[56438]: purged_snaps scrub starts 2026-03-10T11:02:07.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:06 vm07 ceph-mon[56438]: purged_snaps scrub ok 2026-03-10T11:02:07.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:06 vm07 ceph-mon[56438]: Detected new or changed devices on vm06 2026-03-10T11:02:07.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:06 vm07 ceph-mon[56438]: osdmap e26: 8 total, 8 up, 8 in 2026-03-10T11:02:07.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:06 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1418024839' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T11:02:07.356 INFO:teuthology.orchestra.run.vm06.stdout:[client.0] 2026-03-10T11:02:07.356 INFO:teuthology.orchestra.run.vm06.stdout: key = AQAv+q9ppIDiFBAAZ7rXxij9VMlSYgmoMPSwzw== 2026-03-10T11:02:07.504 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-10T11:02:07.504 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T11:02:07.504 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T11:02:07.534 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T11:02:07.699 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm07/config 2026-03-10T11:02:07.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:07 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2606277477' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T11:02:07.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:07 vm06 ceph-mon[49534]: osdmap e27: 8 total, 8 up, 8 in 2026-03-10T11:02:07.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:07 vm06 ceph-mon[49534]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 712 MiB used, 159 GiB / 160 GiB avail 2026-03-10T11:02:07.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:07 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/4001066974' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T11:02:07.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:07 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/4001066974' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T11:02:07.814 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:07 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2606277477' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T11:02:07.814 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:07 vm07 ceph-mon[56438]: osdmap e27: 8 total, 8 up, 8 in 2026-03-10T11:02:07.814 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:07 vm07 ceph-mon[56438]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 712 MiB used, 159 GiB / 160 GiB avail 2026-03-10T11:02:07.814 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:07 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/4001066974' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T11:02:07.814 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:07 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/4001066974' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T11:02:07.965 INFO:teuthology.orchestra.run.vm07.stdout:[client.1] 2026-03-10T11:02:07.965 INFO:teuthology.orchestra.run.vm07.stdout: key = AQAv+q9pl1shORAAAIhXoetpgVi4vHR1jdcScA== 2026-03-10T11:02:08.116 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T11:02:08.116 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-10T11:02:08.116 DEBUG:teuthology.orchestra.run.vm07:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-10T11:02:08.152 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T11:02:08.152 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T11:02:08.152 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mgr dump --format=json 2026-03-10T11:02:08.313 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:08.564 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:08.735 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":19,"flags":0,"active_gid":14217,"active_name":"vm06.luxohm","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":371689821},{"type":"v1","addr":"192.168.123.106:6801","nonce":371689821}]},"active_addr":"192.168.123.106:6801/371689821","active_change":"2026-03-10T11:01:08.908222+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14240,"name":"vm07.rgmael","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.106:8443/","prometheus":"http://192.168.123.106:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":3061232822}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":4136320079}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":266517595}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":3069966193}]}]} 2026-03-10T11:02:08.737 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T11:02:08.737 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T11:02:08.737 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd dump --format=json 2026-03-10T11:02:08.889 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:08 vm07 ceph-mon[56438]: from='client.? 192.168.123.107:0/3128076881' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T11:02:08.889 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:08 vm07 ceph-mon[56438]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T11:02:08.889 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:08 vm07 ceph-mon[56438]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T11:02:08.889 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:08 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2696937394' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T11:02:08.898 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:08.970 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:08 vm06 ceph-mon[49534]: from='client.? 192.168.123.107:0/3128076881' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T11:02:08.970 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:08 vm06 ceph-mon[49534]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T11:02:08.970 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:08 vm06 ceph-mon[49534]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T11:02:08.970 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:08 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2696937394' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T11:02:09.127 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:09.127 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":27,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","created":"2026-03-10T11:00:13.775092+0000","modified":"2026-03-10T11:02:06.873476+0000","last_up_change":"2026-03-10T11:02:05.252126+0000","last_in_change":"2026-03-10T11:01:46.084412+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":13,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T11:01:58.954193+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"9b76adb1-8a8b-4cad-8b2b-f9ead69e56db","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6801","nonce":2148012055}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6803","nonce":2148012055}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6807","nonce":2148012055}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6805","nonce":2148012055}]},"public_addr":"192.168.123.107:6801/2148012055","cluster_addr":"192.168.123.107:6803/2148012055","heartbeat_back_addr":"192.168.123.107:6807/2148012055","heartbeat_front_addr":"192.168.123.107:6805/2148012055","state":["exists","up"]},{"osd":1,"uuid":"6dd37bb9-b004-4153-bb97-565b64a9f2d7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6803","nonce":1755791427}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6805","nonce":1755791427}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6809","nonce":1755791427}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6807","nonce":1755791427}]},"public_addr":"192.168.123.106:6803/1755791427","cluster_addr":"192.168.123.106:6805/1755791427","heartbeat_back_addr":"192.168.123.106:6809/1755791427","heartbeat_front_addr":"192.168.123.106:6807/1755791427","state":["exists","up"]},{"osd":2,"uuid":"6f8f1c30-5206-4f4f-9a3e-10f967c47c15","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6809","nonce":2086159138}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6811","nonce":2086159138}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6815","nonce":2086159138}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6813","nonce":2086159138}]},"public_addr":"192.168.123.107:6809/2086159138","cluster_addr":"192.168.123.107:6811/2086159138","heartbeat_back_addr":"192.168.123.107:6815/2086159138","heartbeat_front_addr":"192.168.123.107:6813/2086159138","state":["exists","up"]},{"osd":3,"uuid":"794f92ac-9222-4621-99c2-6fec07b073f7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6811","nonce":1585385664}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6813","nonce":1585385664}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6817","nonce":1585385664}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6815","nonce":1585385664}]},"public_addr":"192.168.123.106:6811/1585385664","cluster_addr":"192.168.123.106:6813/1585385664","heartbeat_back_addr":"192.168.123.106:6817/1585385664","heartbeat_front_addr":"192.168.123.106:6815/1585385664","state":["exists","up"]},{"osd":4,"uuid":"99c67954-641c-498f-8710-8bc301498be2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6816","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6817","nonce":2858289592}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6818","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6819","nonce":2858289592}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6822","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6823","nonce":2858289592}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6820","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6821","nonce":2858289592}]},"public_addr":"192.168.123.107:6817/2858289592","cluster_addr":"192.168.123.107:6819/2858289592","heartbeat_back_addr":"192.168.123.107:6823/2858289592","heartbeat_front_addr":"192.168.123.107:6821/2858289592","state":["exists","up"]},{"osd":5,"uuid":"bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6819","nonce":1397229693}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6821","nonce":1397229693}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6825","nonce":1397229693}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6823","nonce":1397229693}]},"public_addr":"192.168.123.106:6819/1397229693","cluster_addr":"192.168.123.106:6821/1397229693","heartbeat_back_addr":"192.168.123.106:6825/1397229693","heartbeat_front_addr":"192.168.123.106:6823/1397229693","state":["exists","up"]},{"osd":6,"uuid":"fe73d692-18c6-496a-8ee3-d10ebe2e97c5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6824","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6825","nonce":3420820315}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6826","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6827","nonce":3420820315}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6830","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6831","nonce":3420820315}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6828","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6829","nonce":3420820315}]},"public_addr":"192.168.123.107:6825/3420820315","cluster_addr":"192.168.123.107:6827/3420820315","heartbeat_back_addr":"192.168.123.107:6831/3420820315","heartbeat_front_addr":"192.168.123.107:6829/3420820315","state":["exists","up"]},{"osd":7,"uuid":"11b14356-8f96-4856-878c-cac368917846","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6827","nonce":1003970739}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6829","nonce":1003970739}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6832","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6833","nonce":1003970739}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6831","nonce":1003970739}]},"public_addr":"192.168.123.106:6827/1003970739","cluster_addr":"192.168.123.106:6829/1003970739","heartbeat_back_addr":"192.168.123.106:6833/1003970739","heartbeat_front_addr":"192.168.123.106:6831/1003970739","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:54.624282+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:55.336254+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:56.570624+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:58.546214+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:59.425561+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:01.433622+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:01.769844+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:03.819876+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.106:6801/3060625394":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/3166855479":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1688144993":"2026-03-11T11:00:38.722795+0000","192.168.123.106:0/2264771765":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1742174895":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6800/2836985270":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6801/1615358920":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/297866357":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/1970771380":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6800/3060625394":"2026-03-11T11:01:08.907961+0000","192.168.123.106:6800/1615358920":"2026-03-11T11:00:25.063686+0000","192.168.123.106:6801/2836985270":"2026-03-11T11:00:38.722795+0000","192.168.123.106:0/3240089081":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1846626197":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/2357515870":"2026-03-11T11:01:08.907961+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T11:02:09.293 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T11:02:09.293 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd dump --format=json 2026-03-10T11:02:09.462 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:09.673 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:09.673 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":27,"fsid":"2d4d1532-1c70-11f1-9ee5-8d2ac270c240","created":"2026-03-10T11:00:13.775092+0000","modified":"2026-03-10T11:02:06.873476+0000","last_up_change":"2026-03-10T11:02:05.252126+0000","last_in_change":"2026-03-10T11:01:46.084412+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":13,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T11:01:58.954193+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"9b76adb1-8a8b-4cad-8b2b-f9ead69e56db","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6801","nonce":2148012055}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6803","nonce":2148012055}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6807","nonce":2148012055}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":2148012055},{"type":"v1","addr":"192.168.123.107:6805","nonce":2148012055}]},"public_addr":"192.168.123.107:6801/2148012055","cluster_addr":"192.168.123.107:6803/2148012055","heartbeat_back_addr":"192.168.123.107:6807/2148012055","heartbeat_front_addr":"192.168.123.107:6805/2148012055","state":["exists","up"]},{"osd":1,"uuid":"6dd37bb9-b004-4153-bb97-565b64a9f2d7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6803","nonce":1755791427}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6805","nonce":1755791427}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6809","nonce":1755791427}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1755791427},{"type":"v1","addr":"192.168.123.106:6807","nonce":1755791427}]},"public_addr":"192.168.123.106:6803/1755791427","cluster_addr":"192.168.123.106:6805/1755791427","heartbeat_back_addr":"192.168.123.106:6809/1755791427","heartbeat_front_addr":"192.168.123.106:6807/1755791427","state":["exists","up"]},{"osd":2,"uuid":"6f8f1c30-5206-4f4f-9a3e-10f967c47c15","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6809","nonce":2086159138}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6811","nonce":2086159138}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6815","nonce":2086159138}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":2086159138},{"type":"v1","addr":"192.168.123.107:6813","nonce":2086159138}]},"public_addr":"192.168.123.107:6809/2086159138","cluster_addr":"192.168.123.107:6811/2086159138","heartbeat_back_addr":"192.168.123.107:6815/2086159138","heartbeat_front_addr":"192.168.123.107:6813/2086159138","state":["exists","up"]},{"osd":3,"uuid":"794f92ac-9222-4621-99c2-6fec07b073f7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6811","nonce":1585385664}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6813","nonce":1585385664}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6817","nonce":1585385664}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":1585385664},{"type":"v1","addr":"192.168.123.106:6815","nonce":1585385664}]},"public_addr":"192.168.123.106:6811/1585385664","cluster_addr":"192.168.123.106:6813/1585385664","heartbeat_back_addr":"192.168.123.106:6817/1585385664","heartbeat_front_addr":"192.168.123.106:6815/1585385664","state":["exists","up"]},{"osd":4,"uuid":"99c67954-641c-498f-8710-8bc301498be2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6816","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6817","nonce":2858289592}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6818","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6819","nonce":2858289592}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6822","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6823","nonce":2858289592}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6820","nonce":2858289592},{"type":"v1","addr":"192.168.123.107:6821","nonce":2858289592}]},"public_addr":"192.168.123.107:6817/2858289592","cluster_addr":"192.168.123.107:6819/2858289592","heartbeat_back_addr":"192.168.123.107:6823/2858289592","heartbeat_front_addr":"192.168.123.107:6821/2858289592","state":["exists","up"]},{"osd":5,"uuid":"bcfcb47d-2086-4ffb-a7c2-bb986dc3cb6b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6819","nonce":1397229693}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6821","nonce":1397229693}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6825","nonce":1397229693}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":1397229693},{"type":"v1","addr":"192.168.123.106:6823","nonce":1397229693}]},"public_addr":"192.168.123.106:6819/1397229693","cluster_addr":"192.168.123.106:6821/1397229693","heartbeat_back_addr":"192.168.123.106:6825/1397229693","heartbeat_front_addr":"192.168.123.106:6823/1397229693","state":["exists","up"]},{"osd":6,"uuid":"fe73d692-18c6-496a-8ee3-d10ebe2e97c5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6824","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6825","nonce":3420820315}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6826","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6827","nonce":3420820315}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6830","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6831","nonce":3420820315}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6828","nonce":3420820315},{"type":"v1","addr":"192.168.123.107:6829","nonce":3420820315}]},"public_addr":"192.168.123.107:6825/3420820315","cluster_addr":"192.168.123.107:6827/3420820315","heartbeat_back_addr":"192.168.123.107:6831/3420820315","heartbeat_front_addr":"192.168.123.107:6829/3420820315","state":["exists","up"]},{"osd":7,"uuid":"11b14356-8f96-4856-878c-cac368917846","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6827","nonce":1003970739}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6829","nonce":1003970739}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6832","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6833","nonce":1003970739}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1003970739},{"type":"v1","addr":"192.168.123.106:6831","nonce":1003970739}]},"public_addr":"192.168.123.106:6827/1003970739","cluster_addr":"192.168.123.106:6829/1003970739","heartbeat_back_addr":"192.168.123.106:6833/1003970739","heartbeat_front_addr":"192.168.123.106:6831/1003970739","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:54.624282+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:55.336254+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:56.570624+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:58.546214+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:01:59.425561+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:01.433622+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:01.769844+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T11:02:03.819876+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.106:6801/3060625394":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/3166855479":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1688144993":"2026-03-11T11:00:38.722795+0000","192.168.123.106:0/2264771765":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1742174895":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6800/2836985270":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6801/1615358920":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/297866357":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/1970771380":"2026-03-11T11:00:38.722795+0000","192.168.123.106:6800/3060625394":"2026-03-11T11:01:08.907961+0000","192.168.123.106:6800/1615358920":"2026-03-11T11:00:25.063686+0000","192.168.123.106:6801/2836985270":"2026-03-11T11:00:38.722795+0000","192.168.123.106:0/3240089081":"2026-03-11T11:00:25.063686+0000","192.168.123.106:0/1846626197":"2026-03-11T11:01:08.907961+0000","192.168.123.106:0/2357515870":"2026-03-11T11:01:08.907961+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T11:02:09.843 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.0 flush_pg_stats 2026-03-10T11:02:09.843 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.1 flush_pg_stats 2026-03-10T11:02:09.843 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.2 flush_pg_stats 2026-03-10T11:02:09.844 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.3 flush_pg_stats 2026-03-10T11:02:09.844 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.4 flush_pg_stats 2026-03-10T11:02:09.844 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.5 flush_pg_stats 2026-03-10T11:02:09.844 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.6 flush_pg_stats 2026-03-10T11:02:09.844 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph tell osd.7 flush_pg_stats 2026-03-10T11:02:10.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:09 vm07 ceph-mon[56438]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:10.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:09 vm07 ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:10.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:09 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3990836681' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T11:02:10.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:09 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1053258928' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T11:02:10.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:09 vm06 ceph-mon[49534]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:10.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:09 vm06 ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:10.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:09 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3990836681' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T11:02:10.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:09 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1053258928' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T11:02:10.498 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:10.567 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:10.705 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:10.707 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:10.708 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:10.981 INFO:teuthology.orchestra.run.vm06.stdout:98784247811 2026-03-10T11:02:10.981 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.6 2026-03-10T11:02:11.047 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:11.049 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:11.056 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:11.167 INFO:teuthology.orchestra.run.vm06.stdout:90194313219 2026-03-10T11:02:11.168 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.4 2026-03-10T11:02:11.443 INFO:teuthology.orchestra.run.vm06.stdout:90194313219 2026-03-10T11:02:11.444 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.3 2026-03-10T11:02:11.524 INFO:teuthology.orchestra.run.vm06.stdout:98784247811 2026-03-10T11:02:11.524 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.5 2026-03-10T11:02:11.551 INFO:teuthology.orchestra.run.vm06.stdout:68719476741 2026-03-10T11:02:11.551 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.0 2026-03-10T11:02:11.850 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:11.853 INFO:teuthology.orchestra.run.vm06.stdout:77309411332 2026-03-10T11:02:11.853 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.2 2026-03-10T11:02:11.862 INFO:teuthology.orchestra.run.vm06.stdout:77309411332 2026-03-10T11:02:11.863 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.1 2026-03-10T11:02:11.874 INFO:teuthology.orchestra.run.vm06.stdout:107374182403 2026-03-10T11:02:11.874 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.7 2026-03-10T11:02:11.905 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:12.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:11 vm06 ceph-mon[49534]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:12.271 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:12.299 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:12.349 INFO:teuthology.orchestra.run.vm06.stdout:90194313219 2026-03-10T11:02:12.377 INFO:teuthology.orchestra.run.vm06.stdout:98784247811 2026-03-10T11:02:12.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:11 vm07 ceph-mon[56438]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:12.610 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313219 got 90194313219 for osd.4 2026-03-10T11:02:12.610 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:12.686 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247811 got 98784247811 for osd.6 2026-03-10T11:02:12.686 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:12.703 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:12.880 INFO:teuthology.orchestra.run.vm06.stdout:90194313218 2026-03-10T11:02:12.911 INFO:teuthology.orchestra.run.vm06.stdout:68719476739 2026-03-10T11:02:12.927 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:12.936 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:12.937 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:13.115 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:12 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1077293133' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T11:02:13.115 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:12 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3132340157' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T11:02:13.115 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:12 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/2033461096' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T11:02:13.115 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:12 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/4278094605' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T11:02:13.121 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476739 for osd.0 2026-03-10T11:02:13.129 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313219 got 90194313218 for osd.3 2026-03-10T11:02:13.336 INFO:teuthology.orchestra.run.vm06.stdout:98784247811 2026-03-10T11:02:13.382 INFO:teuthology.orchestra.run.vm06.stdout:107374182403 2026-03-10T11:02:13.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:12 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1077293133' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T11:02:13.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:12 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3132340157' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T11:02:13.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:12 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/2033461096' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T11:02:13.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:12 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/4278094605' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T11:02:13.438 INFO:teuthology.orchestra.run.vm06.stdout:77309411332 2026-03-10T11:02:13.451 INFO:teuthology.orchestra.run.vm06.stdout:77309411332 2026-03-10T11:02:13.548 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247811 got 98784247811 for osd.5 2026-03-10T11:02:13.549 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:13.549 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182403 got 107374182403 for osd.7 2026-03-10T11:02:13.550 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:13.611 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411332 got 77309411332 for osd.1 2026-03-10T11:02:13.611 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:13.612 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411332 got 77309411332 for osd.2 2026-03-10T11:02:13.612 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:14.122 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.0 2026-03-10T11:02:14.130 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph osd last-stat-seq osd.3 2026-03-10T11:02:14.142 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:13 vm06 ceph-mon[49534]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:14.142 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:13 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/176997616' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T11:02:14.142 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:13 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1266175957' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T11:02:14.142 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:13 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3992046752' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T11:02:14.142 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:13 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/3146640129' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T11:02:14.298 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:14.338 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:14.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:13 vm07 ceph-mon[56438]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:14.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:13 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/176997616' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T11:02:14.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:13 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1266175957' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T11:02:14.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:13 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3992046752' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T11:02:14.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:13 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/3146640129' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T11:02:14.562 INFO:teuthology.orchestra.run.vm06.stdout:68719476741 2026-03-10T11:02:14.612 INFO:teuthology.orchestra.run.vm06.stdout:90194313220 2026-03-10T11:02:14.713 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476741 for osd.0 2026-03-10T11:02:14.713 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:14.761 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313219 got 90194313220 for osd.3 2026-03-10T11:02:14.761 DEBUG:teuthology.parallel:result is None 2026-03-10T11:02:14.761 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T11:02:14.761 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph pg dump --format=json 2026-03-10T11:02:14.922 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:15.146 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:15.146 INFO:teuthology.orchestra.run.vm06.stderr:dumped all 2026-03-10T11:02:15.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:14 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/4188398971' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T11:02:15.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:14 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/1205689024' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T11:02:15.296 INFO:teuthology.orchestra.run.vm06.stdout:{"pg_ready":true,"pg_map":{"version":48,"stamp":"2026-03-10T11:02:14.925048+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":218092,"kb_used_data":3372,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167521300,"statfs":{"total":171765137408,"available":171541811200,"internally_reserved":0,"allocated":3452928,"data_stored":2176440,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12712,"internal_metadata":219663960},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"9.051028"},"pg_stats":[{"pgid":"1.0","version":"20'32","reported_seq":17,"reported_epoch":27,"state":"active+clean","last_fresh":"2026-03-10T11:02:07.194341+0000","last_change":"2026-03-10T11:02:07.193264+0000","last_active":"2026-03-10T11:02:07.194341+0000","last_peered":"2026-03-10T11:02:07.194341+0000","last_clean":"2026-03-10T11:02:07.194341+0000","last_became_active":"2026-03-10T11:02:06.885892+0000","last_became_peered":"2026-03-10T11:02:06.885892+0000","last_unstale":"2026-03-10T11:02:07.194341+0000","last_undegraded":"2026-03-10T11:02:07.194341+0000","last_fullsized":"2026-03-10T11:02:07.194341+0000","mapping_epoch":26,"log_start":"0'0","ondisk_log_start":"0'0","created":19,"last_epoch_clean":27,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T11:01:59.109375+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T11:01:59.109375+0000","last_clean_scrub_stamp":"2026-03-10T11:01:59.109375+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T22:25:49.301952+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1851392,"data_stored":1837120,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":25,"seq":107374182403,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27544,"kb_used_data":704,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939880,"statfs":{"total":21470642176,"available":21442437120,"internally_reserved":0,"allocated":720896,"data_stored":559105,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":23,"seq":98784247812,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":23,"seq":98784247812,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":21,"seq":90194313220,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27096,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940328,"statfs":{"total":21470642176,"available":21442895872,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":21,"seq":90194313220,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27540,"kb_used_data":704,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939884,"statfs":{"total":21470642176,"available":21442441216,"internally_reserved":0,"allocated":720896,"data_stored":559105,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27096,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940328,"statfs":{"total":21470642176,"available":21442895872,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27088,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940336,"statfs":{"total":21470642176,"available":21442904064,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27544,"kb_used_data":704,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939880,"statfs":{"total":21470642176,"available":21442437120,"internally_reserved":0,"allocated":720896,"data_stored":559105,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T11:02:15.297 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph pg dump --format=json 2026-03-10T11:02:15.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:14 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/4188398971' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T11:02:15.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:14 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/1205689024' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T11:02:15.469 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:15.689 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:15.689 INFO:teuthology.orchestra.run.vm06.stderr:dumped all 2026-03-10T11:02:15.835 INFO:teuthology.orchestra.run.vm06.stdout:{"pg_ready":true,"pg_map":{"version":48,"stamp":"2026-03-10T11:02:14.925048+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":218092,"kb_used_data":3372,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167521300,"statfs":{"total":171765137408,"available":171541811200,"internally_reserved":0,"allocated":3452928,"data_stored":2176440,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12712,"internal_metadata":219663960},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"9.051028"},"pg_stats":[{"pgid":"1.0","version":"20'32","reported_seq":17,"reported_epoch":27,"state":"active+clean","last_fresh":"2026-03-10T11:02:07.194341+0000","last_change":"2026-03-10T11:02:07.193264+0000","last_active":"2026-03-10T11:02:07.194341+0000","last_peered":"2026-03-10T11:02:07.194341+0000","last_clean":"2026-03-10T11:02:07.194341+0000","last_became_active":"2026-03-10T11:02:06.885892+0000","last_became_peered":"2026-03-10T11:02:06.885892+0000","last_unstale":"2026-03-10T11:02:07.194341+0000","last_undegraded":"2026-03-10T11:02:07.194341+0000","last_fullsized":"2026-03-10T11:02:07.194341+0000","mapping_epoch":26,"log_start":"0'0","ondisk_log_start":"0'0","created":19,"last_epoch_clean":27,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T11:01:59.109375+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T11:01:59.109375+0000","last_clean_scrub_stamp":"2026-03-10T11:01:59.109375+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T22:25:49.301952+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1851392,"data_stored":1837120,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":25,"seq":107374182403,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27544,"kb_used_data":704,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939880,"statfs":{"total":21470642176,"available":21442437120,"internally_reserved":0,"allocated":720896,"data_stored":559105,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":23,"seq":98784247812,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":23,"seq":98784247812,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":21,"seq":90194313220,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27096,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940328,"statfs":{"total":21470642176,"available":21442895872,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":21,"seq":90194313220,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27540,"kb_used_data":704,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939884,"statfs":{"total":21470642176,"available":21442441216,"internally_reserved":0,"allocated":720896,"data_stored":559105,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27096,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940328,"statfs":{"total":21470642176,"available":21442895872,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27088,"kb_used_data":252,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940336,"statfs":{"total":21470642176,"available":21442904064,"internally_reserved":0,"allocated":258048,"data_stored":99825,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27544,"kb_used_data":704,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939880,"statfs":{"total":21470642176,"available":21442437120,"internally_reserved":0,"allocated":720896,"data_stored":559105,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T11:02:15.836 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T11:02:15.836 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T11:02:15.836 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T11:02:15.836 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph health --format=json 2026-03-10T11:02:15.994 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:16.066 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:15 vm06 ceph-mon[49534]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:16.228 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:16.229 INFO:teuthology.orchestra.run.vm06.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T11:02:16.396 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T11:02:16.396 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T11:02:16.396 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T11:02:16.398 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:02:16.398 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch status' 2026-03-10T11:02:16.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:15 vm07 ceph-mon[56438]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:16.562 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:16.789 INFO:teuthology.orchestra.run.vm06.stdout:Backend: cephadm 2026-03-10T11:02:16.789 INFO:teuthology.orchestra.run.vm06.stdout:Available: Yes 2026-03-10T11:02:16.789 INFO:teuthology.orchestra.run.vm06.stdout:Paused: No 2026-03-10T11:02:16.950 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch ps' 2026-03-10T11:02:17.109 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:17.181 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:17 vm06 ceph-mon[49534]: from='client.14536 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:17.181 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:17 vm06 ceph-mon[49534]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:17.181 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:17 vm06 ceph-mon[49534]: from='client.? 192.168.123.106:0/988052156' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager.vm06 vm06 *:9093,9094 running (49s) 13s ago 85s 22.6M - 0.25.0 c8568f914cd2 a807579b2855 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm06 vm06 *:9926 running (92s) 13s ago 92s 8514k - 19.2.3-678-ge911bdeb 654f31e6858e f04afec3b0fe 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm07 vm07 *:9926 running (63s) 13s ago 63s 6660k - 19.2.3-678-ge911bdeb 654f31e6858e 43cdb28ee734 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm06 vm06 running (91s) 13s ago 91s 7662k - 19.2.3-678-ge911bdeb 654f31e6858e cbe6bed3aad1 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm07 vm07 running (62s) 13s ago 62s 7650k - 19.2.3-678-ge911bdeb 654f31e6858e 03f155afca45 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:grafana.vm06 vm06 *:3000 running (48s) 13s ago 78s 83.0M - 10.4.0 c8b91775d855 7b6b7920b655 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm06.luxohm vm06 *:9283,8765,8443 running (2m) 13s ago 2m 545M - 19.2.3-678-ge911bdeb 654f31e6858e f8029ae232b2 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm07.rgmael vm07 *:8443,9283,8765 running (59s) 13s ago 59s 487M - 19.2.3-678-ge911bdeb 654f31e6858e 15f8f374cde9 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm06 vm06 running (2m) 13s ago 2m 49.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5c495012543a 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm07 vm07 running (58s) 13s ago 57s 43.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d9b06c633c41 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm06 vm06 *:9100 running (88s) 13s ago 88s 9181k - 1.7.0 72c9c2088986 89fe88f92fef 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm07 vm07 *:9100 running (60s) 13s ago 60s 9156k - 1.7.0 72c9c2088986 b301c2e37cef 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.0 vm07 running (25s) 13s ago 25s 55.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e cadd21134cd1 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.1 vm06 running (24s) 13s ago 24s 60.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1b78317a4f01 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.2 vm07 running (23s) 13s ago 23s 29.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e abfe9471d7a1 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.3 vm06 running (21s) 13s ago 21s 55.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e37bdbcfe4d7 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.4 vm07 running (20s) 13s ago 20s 54.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6161db3e3079 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.5 vm06 running (18s) 13s ago 18s 34.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9b755fe7792b 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.6 vm07 running (18s) 13s ago 18s 28.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 2588369018ea 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:osd.7 vm06 running (16s) 13s ago 16s 24.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c83d191c7d41 2026-03-10T11:02:17.341 INFO:teuthology.orchestra.run.vm06.stdout:prometheus.vm06 vm06 *:9095 running (46s) 13s ago 72s 33.2M - 2.51.0 1d3b7f56885b d458861c5abd 2026-03-10T11:02:17.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:17 vm07 ceph-mon[56438]: from='client.14536 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:17.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:17 vm07 ceph-mon[56438]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:17.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:17 vm07 ceph-mon[56438]: from='client.? 192.168.123.106:0/988052156' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T11:02:17.493 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch ls' 2026-03-10T11:02:17.658 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager ?:9093,9094 1/1 13s ago 104s count:1 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter ?:9926 2/2 14s ago 105s * 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:crash 2/2 14s ago 106s * 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:grafana ?:3000 1/1 13s ago 105s count:1 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:mgr 2/2 14s ago 106s count:2 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:mon 2/2 14s ago 89s vm06:192.168.123.106=vm06;vm07:192.168.123.107=vm07;count:2 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter ?:9100 2/2 14s ago 104s * 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:osd.all-available-devices 8 14s ago 49s * 2026-03-10T11:02:17.878 INFO:teuthology.orchestra.run.vm06.stdout:prometheus ?:9095 1/1 13s ago 105s count:1 2026-03-10T11:02:18.048 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch host ls' 2026-03-10T11:02:18.218 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:18.259 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:18 vm06 ceph-mon[49534]: from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:18.259 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:18 vm06 ceph-mon[49534]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:18.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:18 vm07 ceph-mon[56438]: from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:18.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:18 vm07 ceph-mon[56438]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:18.442 INFO:teuthology.orchestra.run.vm06.stdout:HOST ADDR LABELS STATUS 2026-03-10T11:02:18.442 INFO:teuthology.orchestra.run.vm06.stdout:vm06 192.168.123.106 2026-03-10T11:02:18.442 INFO:teuthology.orchestra.run.vm06.stdout:vm07 192.168.123.107 2026-03-10T11:02:18.442 INFO:teuthology.orchestra.run.vm06.stdout:2 hosts in cluster 2026-03-10T11:02:18.608 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch device ls' 2026-03-10T11:02:18.771 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:18.996 INFO:teuthology.orchestra.run.vm06.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 13s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 14s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdb hdd DWNBRSTVMM07001 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdc hdd DWNBRSTVMM07002 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdd hdd DWNBRSTVMM07003 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:18.997 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vde hdd DWNBRSTVMM07004 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.139 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:19 vm06 ceph-mon[49534]: from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:19.139 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:19 vm06 ceph-mon[49534]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:19.165 INFO:teuthology.run_tasks:Running task vip... 2026-03-10T11:02:19.168 INFO:tasks.vip:Allocating static IPs for each host... 2026-03-10T11:02:19.168 INFO:tasks.vip:peername 192.168.123.106 2026-03-10T11:02:19.168 INFO:tasks.vip:192.168.123.106 in 192.168.123.0/24, pos 105 2026-03-10T11:02:19.168 INFO:tasks.vip:vm06.local static 12.12.0.106, vnet 12.12.0.0/22 2026-03-10T11:02:19.168 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.106')] 2026-03-10T11:02:19.168 DEBUG:teuthology.orchestra.run.vm06:> sudo ip route ls 2026-03-10T11:02:19.192 INFO:teuthology.orchestra.run.vm06.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.106 metric 100 2026-03-10T11:02:19.192 INFO:teuthology.orchestra.run.vm06.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.106 metric 100 2026-03-10T11:02:19.193 INFO:tasks.vip:Configuring 12.12.0.106 on vm06.local iface eth0... 2026-03-10T11:02:19.193 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr add 12.12.0.106/22 dev eth0 2026-03-10T11:02:19.261 INFO:tasks.vip:peername 192.168.123.107 2026-03-10T11:02:19.262 INFO:tasks.vip:192.168.123.107 in 192.168.123.0/24, pos 106 2026-03-10T11:02:19.262 INFO:tasks.vip:vm07.local static 12.12.0.107, vnet 12.12.0.0/22 2026-03-10T11:02:19.262 DEBUG:teuthology.orchestra.run.vm07:> sudo ip route ls 2026-03-10T11:02:19.282 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:19 vm07 ceph-mon[56438]: from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:19.282 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:19 vm07 ceph-mon[56438]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:19.284 INFO:teuthology.orchestra.run.vm07.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.107 metric 100 2026-03-10T11:02:19.284 INFO:teuthology.orchestra.run.vm07.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.107 metric 100 2026-03-10T11:02:19.285 INFO:tasks.vip:Configuring 12.12.0.107 on vm07.local iface eth0... 2026-03-10T11:02:19.285 DEBUG:teuthology.orchestra.run.vm07:> sudo ip addr add 12.12.0.107/22 dev eth0 2026-03-10T11:02:19.353 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T11:02:19.355 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:02:19.355 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch device ls --refresh' 2026-03-10T11:02:19.526 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 14s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 15s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdb hdd DWNBRSTVMM07001 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdc hdd DWNBRSTVMM07002 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdd hdd DWNBRSTVMM07003 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.764 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vde hdd DWNBRSTVMM07004 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:02:19.930 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-10T11:02:19.932 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:02:19.932 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-10T11:02:19.978 INFO:teuthology.orchestra.run.vm06.stderr:+ systemctl stop nfs-server 2026-03-10T11:02:19.983 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm07.local 2026-03-10T11:02:19.983 DEBUG:teuthology.orchestra.run.vm07:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-10T11:02:20.027 INFO:teuthology.orchestra.run.vm07.stderr:+ systemctl stop nfs-server 2026-03-10T11:02:20.033 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T11:02:20.037 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:02:20.037 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph fs volume create foofs' 2026-03-10T11:02:20.235 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:20.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:20 vm06.local ceph-mon[49534]: from='client.14560 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:20 vm06.local ceph-mon[49534]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:20 vm06.local ceph-mon[49534]: from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:20 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:20.291 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:20 vm07.local ceph-mon[56438]: from='client.14560 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:20.291 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:20 vm07.local ceph-mon[56438]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:20.291 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:20 vm07.local ceph-mon[56438]: from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:20.291 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:20 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:21.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:21 vm06.local ceph-mon[49534]: from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:21.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-10T11:02:21.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.286 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:21 vm07.local ceph-mon[56438]: from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:21.286 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-10T11:02:21.286 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.286 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.287 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:21.287 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06[49510]: 2026-03-10T11:02:22.082+0000 7f28c0e1b640 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='client.24337 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: osdmap e28: 8 total, 8 up, 8 in 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.143 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.311 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph nfs cluster create foo --ingress --virtual-ip 12.12.1.106/22 --port 2999' 2026-03-10T11:02:22.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='client.24337 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:22.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:22.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-10T11:02:22.417 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: osdmap e28: 8 total, 8 up, 8 in 2026-03-10T11:02:22.417 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-10T11:02:22.417 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.417 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.417 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.417 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:22.502 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: osdmap e29: 8 total, 8 up, 8 in 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: osdmap e30: 8 total, 8 up, 8 in 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: fsmap foofs:0 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: Saving service mds.foofs spec with placement count:2 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "ingress": true, "virtual_ip": "12.12.1.106/22", "port": 2999, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm07.yrhofr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm07.yrhofr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: Deploying daemon mds.foofs.vm07.yrhofr on vm07 2026-03-10T11:02:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:23 vm07.local ceph-mon[56438]: pgmap v55: 65 pgs: 6 creating+activating, 2 creating+peering, 43 unknown, 14 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: osdmap e29: 8 total, 8 up, 8 in 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: osdmap e30: 8 total, 8 up, 8 in 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: fsmap foofs:0 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: Saving service mds.foofs spec with placement count:2 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "ingress": true, "virtual_ip": "12.12.1.106/22", "port": 2999, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm07.yrhofr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm07.yrhofr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: Deploying daemon mds.foofs.vm07.yrhofr on vm07 2026-03-10T11:02:23.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:23 vm06.local ceph-mon[49534]: pgmap v55: 65 pgs: 6 creating+activating, 2 creating+peering, 43 unknown, 14 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]': finished 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: osdmap e31: 8 total, 8 up, 8 in 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.yhlcqi", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.yhlcqi", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: Deploying daemon mds.foofs.vm06.yhlcqi on vm06 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.249 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:24.368 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake' 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]': finished 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: osdmap e31: 8 total, 8 up, 8 in 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.yhlcqi", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.yhlcqi", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: Deploying daemon mds.foofs.vm06.yhlcqi on vm06 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:24.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:24.709 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout: "bind": "/fake", 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout: "cluster": "foo", 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout: "fs": "foofs", 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout: "mode": "RW", 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout: "path": "/" 2026-03-10T11:02:25.227 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: osdmap e32: 8 total, 8 up, 8 in 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013] up:boot 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: daemon mds.foofs.vm07.yrhofr assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: Cluster is now healthy 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: fsmap foofs:0 1 up:standby 2026-03-10T11:02:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata", "who": "foofs.vm07.yrhofr"}]: dispatch 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:creating} 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: Saving service nfs.foo spec with placement count:1 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: Saving service ingress.nfs.foo spec with placement count:2 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: daemon mds.foofs.vm07.yrhofr is now active in filesystem foofs as rank 0 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: pgmap v58: 97 pgs: 6 creating+activating, 12 creating+peering, 31 unknown, 48 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 263 B/s wr, 1 op/s 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: from='client.14584 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:25.254 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:25 vm06.local ceph-mon[49534]: osdmap e33: 8 total, 8 up, 8 in 2026-03-10T11:02:25.340 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: osdmap e32: 8 total, 8 up, 8 in 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013] up:boot 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: daemon mds.foofs.vm07.yrhofr assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: Cluster is now healthy 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: fsmap foofs:0 1 up:standby 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata", "who": "foofs.vm07.yrhofr"}]: dispatch 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:creating} 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: Saving service nfs.foo spec with placement count:1 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: Saving service ingress.nfs.foo spec with placement count:2 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: daemon mds.foofs.vm07.yrhofr is now active in filesystem foofs as rank 0 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: pgmap v58: 97 pgs: 6 creating+activating, 12 creating+peering, 31 unknown, 48 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 263 B/s wr, 1 op/s 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: from='client.14584 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:25.341 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:25 vm07.local ceph-mon[56438]: osdmap e33: 8 total, 8 up, 8 in 2026-03-10T11:02:25.389 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T11:02:25.392 INFO:tasks.cephadm:Waiting for ceph service nfs.foo to start (timeout 300)... 2026-03-10T11:02:25.392 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:25.618 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:25.903 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:25.903 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:21.219267Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:21.219176Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:21.219207Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:21.219294Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:24.178485Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 1, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:21.219144Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:21.219073Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:24.167873Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:21.219238Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:21.219347Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:21.219321Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:26.068 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013] up:active 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: mds.? [v2:192.168.123.106:6834/952736302,v1:192.168.123.106:6835/952736302] up:boot 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata", "who": "foofs.vm06.yhlcqi"}]: dispatch 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: mgrmap e20: vm06.luxohm(active, since 76s), standbys: vm07.rgmael 2026-03-10T11:02:26.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:26 vm06.local ceph-mon[49534]: from='client.14596 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013] up:active 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: mds.? [v2:192.168.123.106:6834/952736302,v1:192.168.123.106:6835/952736302] up:boot 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "mds metadata", "who": "foofs.vm06.yhlcqi"}]: dispatch 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: mgrmap e20: vm06.luxohm(active, since 76s), standbys: vm07.rgmael 2026-03-10T11:02:26.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:26 vm07.local ceph-mon[56438]: from='client.14596 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:27.068 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: Creating key for client.nfs.foo.0.0.vm06.gikzfj 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T11:02:27.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: Rados config object exists: conf-nfs.foo 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: Creating key for client.nfs.foo.0.0.vm06.gikzfj-rgw 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: Bind address in nfs.foo.0.0.vm06.gikzfj's ganesha conf is defaulting to empty 2026-03-10T11:02:27.363 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:27.364 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: Deploying daemon nfs.foo.0.0.vm06.gikzfj on vm06 2026-03-10T11:02:27.364 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:27 vm06.local ceph-mon[49534]: pgmap v60: 97 pgs: 6 creating+activating, 10 creating+peering, 10 unknown, 71 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s wr, 2 op/s 2026-03-10T11:02:27.430 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: Creating key for client.nfs.foo.0.0.vm06.gikzfj 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: Rados config object exists: conf-nfs.foo 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: Creating key for client.nfs.foo.0.0.vm06.gikzfj-rgw 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm06.gikzfj-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: Bind address in nfs.foo.0.0.vm06.gikzfj's ganesha conf is defaulting to empty 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: Deploying daemon nfs.foo.0.0.vm06.gikzfj on vm06 2026-03-10T11:02:27.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:27 vm07.local ceph-mon[56438]: pgmap v60: 97 pgs: 6 creating+activating, 10 creating+peering, 10 unknown, 71 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s wr, 2 op/s 2026-03-10T11:02:27.697 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:27.697 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:24.178485Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:27.869 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:28.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: mgrmap e21: vm06.luxohm(active, since 78s), standbys: vm07.rgmael 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: Deploying daemon haproxy.nfs.foo.vm06.rbqjdy on vm06 2026-03-10T11:02:28.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:28 vm06.local ceph-mon[49534]: from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:28.869 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: mgrmap e21: vm06.luxohm(active, since 78s), standbys: vm07.rgmael 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: Deploying daemon haproxy.nfs.foo.vm06.rbqjdy on vm06 2026-03-10T11:02:28.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:28 vm07.local ceph-mon[56438]: from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:29.043 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:29.281 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:29.281 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:24.178485Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:29.437 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:29 vm06.local ceph-mon[49534]: mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013] up:active 2026-03-10T11:02:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:29 vm06.local ceph-mon[49534]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:29 vm06.local ceph-mon[49534]: pgmap v61: 97 pgs: 8 creating+peering, 89 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s wr, 5 op/s 2026-03-10T11:02:29.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:29 vm07.local ceph-mon[56438]: mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013] up:active 2026-03-10T11:02:29.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:29 vm07.local ceph-mon[56438]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:29.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:29 vm07.local ceph-mon[56438]: pgmap v61: 97 pgs: 8 creating+peering, 89 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s wr, 5 op/s 2026-03-10T11:02:30.438 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:30.751 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:30.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:30 vm06.local ceph-mon[49534]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:30.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:30 vm06.local ceph-mon[49534]: mds.? [v2:192.168.123.106:6834/952736302,v1:192.168.123.106:6835/952736302] up:standby 2026-03-10T11:02:30.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:30 vm06.local ceph-mon[49534]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:30.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:30 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:30.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:30 vm07.local ceph-mon[56438]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:30.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:30 vm07.local ceph-mon[56438]: mds.? [v2:192.168.123.106:6834/952736302,v1:192.168.123.106:6835/952736302] up:standby 2026-03-10T11:02:30.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:30 vm07.local ceph-mon[56438]: fsmap foofs:1 {0=foofs.vm07.yrhofr=up:active} 1 up:standby 2026-03-10T11:02:30.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:30 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:31.026 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:31.027 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:24.178485Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:31.201 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:31.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:31 vm06.local ceph-mon[49534]: pgmap v62: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 3.3 KiB/s wr, 8 op/s 2026-03-10T11:02:31.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:31 vm06.local ceph-mon[49534]: from='client.24381 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:31.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:31 vm07.local ceph-mon[56438]: pgmap v62: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 3.3 KiB/s wr, 8 op/s 2026-03-10T11:02:31.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:31 vm07.local ceph-mon[56438]: from='client.24381 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:32.202 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:32.373 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:32.625 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:32.625 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:31.966434Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:32.789 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:33.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:32 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:33.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:32 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:33.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:32 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:33.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:32 vm06.local ceph-mon[49534]: Deploying daemon haproxy.nfs.foo.vm07.clltbz on vm07 2026-03-10T11:02:33.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:32 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:33.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:32 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:33.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:32 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:33.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:32 vm07.local ceph-mon[56438]: Deploying daemon haproxy.nfs.foo.vm07.clltbz on vm07 2026-03-10T11:02:33.789 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:33.963 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:34.079 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:33 vm06.local ceph-mon[49534]: from='client.14628 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:34.079 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:33 vm06.local ceph-mon[49534]: pgmap v63: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.3 KiB/s wr, 8 op/s 2026-03-10T11:02:34.199 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:34.199 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:31.966434Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:34.348 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:34.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:33 vm07.local ceph-mon[56438]: from='client.14628 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:34.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:33 vm07.local ceph-mon[56438]: pgmap v63: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.3 KiB/s wr, 8 op/s 2026-03-10T11:02:35.348 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:35.523 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:35.641 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:35 vm06.local ceph-mon[49534]: from='client.14632 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:35.641 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:35 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:35.641 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:35 vm06.local ceph-mon[49534]: pgmap v64: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.8 KiB/s wr, 7 op/s 2026-03-10T11:02:35.688 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:35 vm07.local ceph-mon[56438]: from='client.14632 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:35.688 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:35 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:35.688 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:35 vm07.local ceph-mon[56438]: pgmap v64: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.8 KiB/s wr, 7 op/s 2026-03-10T11:02:35.776 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:35.776 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:31.966434Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:35.928 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:36.928 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:37.095 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: from='client.14636 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: 12.12.1.106 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: 12.12.1.106 is in 12.12.0.0/22 on vm07 interface eth0 2026-03-10T11:02:37.177 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:36 vm06.local ceph-mon[49534]: Deploying daemon keepalived.nfs.foo.vm06.ewpmce on vm06 2026-03-10T11:02:37.341 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:37.341 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:35.781198Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: from='client.14636 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: 12.12.1.106 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: 12.12.1.106 is in 12.12.0.0/22 on vm07 interface eth0 2026-03-10T11:02:37.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:36 vm07.local ceph-mon[56438]: Deploying daemon keepalived.nfs.foo.vm06.ewpmce on vm06 2026-03-10T11:02:37.491 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:38.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:37 vm06.local ceph-mon[49534]: pgmap v65: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 2.4 KiB/s wr, 6 op/s 2026-03-10T11:02:38.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:37 vm07.local ceph-mon[56438]: pgmap v65: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 2.4 KiB/s wr, 6 op/s 2026-03-10T11:02:38.492 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:38.741 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:39.037 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:39.037 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:35.781198Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:39.168 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:38 vm06.local ceph-mon[49534]: from='client.14640 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:39.200 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:39.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:38 vm07.local ceph-mon[56438]: from='client.14640 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:40.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:39 vm06.local ceph-mon[49534]: pgmap v66: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1.6 KiB/s wr, 5 op/s 2026-03-10T11:02:40.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:39 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:40.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:39 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:40.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:39 vm06.local ceph-mon[49534]: from='client.14644 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:40.200 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:39 vm07.local ceph-mon[56438]: pgmap v66: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1.6 KiB/s wr, 5 op/s 2026-03-10T11:02:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:39 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:39 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:39 vm07.local ceph-mon[56438]: from='client.14644 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:40.527 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:40.795 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:40.795 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:40.521276Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:40.947 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: 12.12.1.106 is in 12.12.0.0/22 on vm07 interface eth0 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: 12.12.1.106 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: Deploying daemon keepalived.nfs.foo.vm07.hlkpmk on vm07 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:41.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:41 vm07.local ceph-mon[56438]: pgmap v67: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1023 B/s wr, 3 op/s 2026-03-10T11:02:41.948 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: 12.12.1.106 is in 12.12.0.0/22 on vm07 interface eth0 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: 12.12.1.106 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: Deploying daemon keepalived.nfs.foo.vm07.hlkpmk on vm07 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:41.971 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:41 vm06.local ceph-mon[49534]: pgmap v67: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1023 B/s wr, 3 op/s 2026-03-10T11:02:42.117 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:42.398 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:42.399 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:40.521276Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:42.570 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:43.570 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:43.749 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:43.989 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:43.989 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:40.521276Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:44.138 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:43 vm06.local ceph-mon[49534]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:44.138 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:43 vm06.local ceph-mon[49534]: pgmap v68: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T11:02:44.165 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:44.246 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:43 vm07.local ceph-mon[56438]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:44.247 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:43 vm07.local ceph-mon[56438]: pgmap v68: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T11:02:45.111 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:45 vm07.local ceph-mon[56438]: from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:45.111 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:45 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.111 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:45 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.111 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:45 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.111 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:45 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.111 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:45 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:45.166 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:45.367 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:45 vm06.local ceph-mon[49534]: from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:45.367 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:45 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.367 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:45 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.367 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:45 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.367 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:45 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:45.368 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:45 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:45.402 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:45.707 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:45.707 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:26.347864Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:25.593695Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:25.593741Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:26.347890Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:44.941576Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:25.593964Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:25.593800Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:25.593828Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:27.476400Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:25.593771Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:25.593855Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:26.347915Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:45.868 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-10T11:02:46.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:46 vm06.local ceph-mon[49534]: pgmap v69: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T11:02:46.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:46 vm06.local ceph-mon[49534]: from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:46 vm07.local ceph-mon[56438]: pgmap v69: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T11:02:46.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:46 vm07.local ceph-mon[56438]: from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:46.869 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:47.099 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:47.365 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:47.365 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:46.960365Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:46.193456Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:46.193509Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:46.960397Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:44.941576Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "last_refresh": "2026-03-10T11:02:46.193795Z", "ports": [2999, 9999], "running": 4, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:46.193762Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:46.193579Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:46.193611Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:46.987115Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "last_refresh": "2026-03-10T11:02:46.960612Z", "ports": [12999], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:46.193545Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:46.193642Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:46.960428Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:47.419 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.419 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.419 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: pgmap v70: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:47.419 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.419 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.419 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:47.420 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:47.420 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.420 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:47.420 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.420 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:47 vm06.local ceph-mon[49534]: Reconfiguring prometheus.vm06 (dependencies changed)... 2026-03-10T11:02:47.538 INFO:tasks.cephadm:nfs.foo has 1/1 2026-03-10T11:02:47.538 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T11:02:47.540 INFO:tasks.cephadm:Waiting for ceph service ingress.nfs.foo to start (timeout 300)... 2026-03-10T11:02:47.540 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph orch ls -f json 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: pgmap v70: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:47.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:47 vm07.local ceph-mon[56438]: Reconfiguring prometheus.vm06 (dependencies changed)... 2026-03-10T11:02:47.902 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:48.175 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-10T11:02:48.175 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T11:00:33.637535Z", "last_refresh": "2026-03-10T11:02:46.960365Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:13.517336Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T11:00:31.962075Z", "last_refresh": "2026-03-10T11:02:46.193456Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:14.457612Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T11:00:31.488555Z", "last_refresh": "2026-03-10T11:02:46.193509Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T11:00:32.797481Z", "last_refresh": "2026-03-10T11:02:46.960397Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:02:44.941576Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-03-10T11:02:24.168837Z", "last_refresh": "2026-03-10T11:02:46.193795Z", "ports": [2999, 9999], "running": 4, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-03-10T11:02:24.746697Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-10T11:02:22.133618Z", "last_refresh": "2026-03-10T11:02:46.193762Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:18.033909Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T11:00:31.101160Z", "last_refresh": "2026-03-10T11:02:46.193579Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:19.419545Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T11:00:48.711712Z", "last_refresh": "2026-03-10T11:02:46.193611Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T11:02:46.987115Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-10T11:02:24.150425Z", "last_refresh": "2026-03-10T11:02:46.960612Z", "ports": [12999], "running": 1, "size": 1}}, {"events": ["2026-03-10T11:01:17.159457Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T11:00:33.202154Z", "last_refresh": "2026-03-10T11:02:46.193545Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T11:01:28.103899Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T11:01:28.094481Z", "last_refresh": "2026-03-10T11:02:46.193642Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T11:01:19.422918Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T11:00:32.386474Z", "last_refresh": "2026-03-10T11:02:46.960428Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-10T11:02:48.357 INFO:tasks.cephadm:ingress.nfs.foo has 4/4 2026-03-10T11:02:48.357 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-10T11:02:48.360 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:02:48.360 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mkdir /mnt/foo' 2026-03-10T11:02:48.415 INFO:teuthology.orchestra.run.vm06.stderr:+ mkdir /mnt/foo 2026-03-10T11:02:48.417 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sleep 5' 2026-03-10T11:02:48.439 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:48 vm06.local ceph-mon[49534]: Reconfiguring daemon prometheus.vm06 on vm06 2026-03-10T11:02:48.439 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:48 vm06.local ceph-mon[49534]: from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:48.455 INFO:teuthology.orchestra.run.vm06.stderr:+ sleep 5 2026-03-10T11:02:48.502 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:48 vm07.local ceph-mon[56438]: Reconfiguring daemon prometheus.vm06 on vm06 2026-03-10T11:02:48.502 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:48 vm07.local ceph-mon[56438]: from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:49.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:49 vm06.local ceph-mon[49534]: pgmap v71: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:49.665 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T11:02:49.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:49.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:49.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:02:49.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T11:02:49.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:49.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:49 vm07.local ceph-mon[56438]: pgmap v71: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:51.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:50 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:51.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:50 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:51.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:50 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:51.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:50 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:51.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:50 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:51.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:50 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:51 vm06.local ceph-mon[49534]: pgmap v72: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:52.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:52.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:52.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:52.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:52.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:51 vm07.local ceph-mon[56438]: pgmap v72: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:53.458 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mount -t nfs 12.12.1.106:/fake /mnt/foo -o port=2999' 2026-03-10T11:02:53.526 INFO:teuthology.orchestra.run.vm06.stderr:+ mount -t nfs 12.12.1.106:/fake /mnt/foo -o port=2999 2026-03-10T11:02:53.726 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo test > /mnt/foo/testfile' 2026-03-10T11:02:53.797 INFO:teuthology.orchestra.run.vm06.stderr:+ echo test 2026-03-10T11:02:53.816 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c sync 2026-03-10T11:02:53.882 INFO:teuthology.orchestra.run.vm06.stderr:+ sync 2026-03-10T11:02:54.274 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T11:02:54.278 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -v /mnt/foo:/mnt/foo -- bash -c 'echo "Check with each haproxy down in turn..." 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '"'"'{print $1}'"'"'`; do 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> ceph orch daemon stop $haproxy 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> cat /mnt/foo/testfile 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> echo $haproxy > /mnt/foo/testfile 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> sync 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> ceph orch daemon start $haproxy 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> done 2026-03-10T11:02:54.278 DEBUG:teuthology.orchestra.run.vm06:> ' 2026-03-10T11:02:54.495 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:02:54.539 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:54 vm06.local ceph-mon[49534]: pgmap v73: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:54.539 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:54 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:54.582 INFO:teuthology.orchestra.run.vm06.stdout:Check with each haproxy down in turn... 2026-03-10T11:02:54.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:54 vm07.local ceph-mon[56438]: pgmap v73: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-10T11:02:54.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:54 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:02:54.924 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop haproxy.nfs.foo.vm06.rbqjdy on host 'vm06' 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm06.rbqjdy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: Schedule stop daemon haproxy.nfs.foo.vm06.rbqjdy 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:56.080 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:55 vm06.local ceph-mon[49534]: pgmap v74: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm06.rbqjdy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: Schedule stop daemon haproxy.nfs.foo.vm06.rbqjdy 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:56.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:55 vm07.local ceph-mon[56438]: pgmap v74: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:57.276 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:57 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:02:57.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:57 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:58.302 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:58 vm06.local ceph-mon[49534]: from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:58.302 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:58 vm06.local ceph-mon[49534]: pgmap v75: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T11:02:58.302 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:58 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:58.302 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:58 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:58.302 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:58 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:58.375 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:58 vm07.local ceph-mon[56438]: from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:58.375 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:58 vm07.local ceph-mon[56438]: pgmap v75: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 85 B/s wr, 0 op/s 2026-03-10T11:02:58.375 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:58 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:58.375 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:58 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:58.375 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:58 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:02:59.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:59 vm06.local ceph-mon[49534]: from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:59.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:59 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:59.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:02:59 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:59.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:59 vm07.local ceph-mon[56438]: from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:02:59.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:59 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:59.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:02:59 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:02:59.880 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm06.rbqjdy vm06 *:2999,9999 stopped 0s ago 27s - - 2026-03-10T11:03:00.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: pgmap v76: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 938 B/s wr, 0 op/s 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:00.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:00 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: pgmap v76: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 938 B/s wr, 0 op/s 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:00.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:00 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:01.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:01 vm06.local ceph-mon[49534]: from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:01.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:01 vm07.local ceph-mon[56438]: from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:02.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:02 vm07.local ceph-mon[56438]: pgmap v77: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:02.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:02 vm06.local ceph-mon[49534]: pgmap v77: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:04.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:04 vm07.local ceph-mon[56438]: pgmap v78: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:04.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:04 vm06.local ceph-mon[49534]: pgmap v78: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:05.944 INFO:teuthology.orchestra.run.vm06.stdout:test 2026-03-10T11:03:06.168 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to start haproxy.nfs.foo.vm06.rbqjdy on host 'vm06' 2026-03-10T11:03:06.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:06 vm06.local ceph-mon[49534]: pgmap v79: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:06.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:06 vm07.local ceph-mon[56438]: pgmap v79: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm06.rbqjdy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: Schedule start daemon haproxy.nfs.foo.vm06.rbqjdy 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:07.165 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:07 vm07.local ceph-mon[56438]: pgmap v80: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm06.rbqjdy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: Schedule start daemon haproxy.nfs.foo.vm06.rbqjdy 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:07.426 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:07 vm06.local ceph-mon[49534]: pgmap v80: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1023 B/s wr, 0 op/s 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='client.24423 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:08.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:08 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.686 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='client.24423 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:08.687 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:08 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:10.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:10 vm06.local ceph-mon[49534]: from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:10.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:10 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:10.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:10 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:10.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:10 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:10.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:10 vm06.local ceph-mon[49534]: pgmap v81: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.0 KiB/s wr, 1 op/s 2026-03-10T11:03:10.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:10 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:03:10.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:10 vm07.local ceph-mon[56438]: from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:10.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:10 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:10.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:10 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:10.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:10 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:10.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:10 vm07.local ceph-mon[56438]: pgmap v81: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.0 KiB/s wr, 1 op/s 2026-03-10T11:03:10.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:10 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:03:11.417 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm06.rbqjdy vm06 *:2999,9999 running (2s) 0s ago 39s 3644k - 2.3.17-d1c9119 e85424b0d443 c787689fcef0 2026-03-10T11:03:11.599 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop haproxy.nfs.foo.vm07.clltbz on host 'vm07' 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='client.24427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: pgmap v82: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:11.599 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:11 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='client.24427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: pgmap v82: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:11.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:11 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:12.592 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: from='client.14718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:12.593 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: from='client.14722 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm07.clltbz", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:12.593 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: Schedule stop daemon haproxy.nfs.foo.vm07.clltbz 2026-03-10T11:03:12.593 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:12.593 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:12.593 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:12.593 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:12 vm07.local ceph-mon[56438]: from='client.14726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:12.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: from='client.14718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:12.835 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: from='client.14722 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm07.clltbz", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:12.835 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: Schedule stop daemon haproxy.nfs.foo.vm07.clltbz 2026-03-10T11:03:12.835 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:12.835 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:12.835 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:12.835 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:12 vm06.local ceph-mon[49534]: from='client.14726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:14.357 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: pgmap v83: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='client.14730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:14.358 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:14 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: pgmap v83: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='client.14730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:14.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:14.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:14.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:14 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:15.627 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:15 vm06.local ceph-mon[49534]: from='client.14734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:15.627 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:15 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:15.627 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:15 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:15.627 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:15 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:15.627 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:15 vm06.local ceph-mon[49534]: pgmap v84: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-10T11:03:15.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:15 vm07.local ceph-mon[56438]: from='client.14734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:15.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:15 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:15.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:15 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:15.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:15 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:15.666 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:15 vm07.local ceph-mon[56438]: pgmap v84: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-10T11:03:16.566 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm07.clltbz vm07 *:2999,9999 stopped 1s ago 40s - - 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:16.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:16 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.571 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm06.rbqjdy 2026-03-10T11:03:16.774 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to start haproxy.nfs.foo.vm07.clltbz on host 'vm07' 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:16.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:16 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: from='client.14742 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm07.clltbz", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: Schedule start daemon haproxy.nfs.foo.vm07.clltbz 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: pgmap v85: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-10T11:03:17.959 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:17 vm06.local ceph-mon[49534]: from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: from='client.14742 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm07.clltbz", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: Schedule start daemon haproxy.nfs.foo.vm07.clltbz 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: pgmap v85: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-10T11:03:18.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:17 vm07.local ceph-mon[56438]: from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: pgmap v86: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.4 KiB/s wr, 1 op/s 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:19 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: pgmap v86: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.4 KiB/s wr, 1 op/s 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:20.303 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:19 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:21.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:20 vm07.local ceph-mon[56438]: from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:21.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:20 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:21.166 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:20 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:21.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:20 vm06.local ceph-mon[49534]: from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:21.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:20 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:21.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:20 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:21.791 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm07.clltbz vm07 *:2999,9999 running (2s) 0s ago 46s 3653k - 2.3.17-d1c9119 e85424b0d443 80ca68874d21 2026-03-10T11:03:21.945 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-10T11:03:21.947 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:03:21.947 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo "Check with $(hostname) ganesha(s) down..." 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> for c in `systemctl | grep ceph- | grep @nfs | awk '"'"'{print $1}'"'"'`; do 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> cid=`echo $c | sed '"'"'s/@/-/'"'"'` 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> id=`echo $c | cut -d @ -f 2 | sed '"'"'s/.service$//'"'"'` 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> echo "Removing daemon $id fsid $fsid..." 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> echo "Waking up cephadm..." 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> while ! timeout 1 cat /mnt/foo/testfile ; do true ; done 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> echo "Mount is back!" 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> done 2026-03-10T11:03:21.948 DEBUG:teuthology.orchestra.run.vm06:> ' 2026-03-10T11:03:21.982 INFO:teuthology.orchestra.run.vm06.stderr:++ hostname 2026-03-10T11:03:21.982 INFO:teuthology.orchestra.run.vm06.stderr:+ echo 'Check with vm06.local ganesha(s) down...' 2026-03-10T11:03:21.983 INFO:teuthology.orchestra.run.vm06.stdout:Check with vm06.local ganesha(s) down... 2026-03-10T11:03:21.983 INFO:teuthology.orchestra.run.vm06.stderr:++ awk '{print $1}' 2026-03-10T11:03:21.983 INFO:teuthology.orchestra.run.vm06.stderr:++ grep @nfs 2026-03-10T11:03:21.984 INFO:teuthology.orchestra.run.vm06.stderr:++ grep ceph- 2026-03-10T11:03:21.985 INFO:teuthology.orchestra.run.vm06.stderr:++ systemctl 2026-03-10T11:03:21.993 INFO:teuthology.orchestra.run.vm06.stderr:+ for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'` 2026-03-10T11:03:21.998 INFO:teuthology.orchestra.run.vm06.stderr:++ echo ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@nfs.foo.0.0.vm06.gikzfj.service 2026-03-10T11:03:22.000 INFO:teuthology.orchestra.run.vm06.stderr:++ sed s/@/-/ 2026-03-10T11:03:22.000 INFO:teuthology.orchestra.run.vm06.stderr:+ cid=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-nfs.foo.0.0.vm06.gikzfj.service 2026-03-10T11:03:22.000 INFO:teuthology.orchestra.run.vm06.stderr:++ echo ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@nfs.foo.0.0.vm06.gikzfj.service 2026-03-10T11:03:22.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:21 vm06.local ceph-mon[49534]: from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:22.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:21 vm06.local ceph-mon[49534]: pgmap v87: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-10T11:03:22.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:22.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:21 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:22.004 INFO:teuthology.orchestra.run.vm06.stderr:++ sed 's/.service$//' 2026-03-10T11:03:22.004 INFO:teuthology.orchestra.run.vm06.stderr:++ cut -d @ -f 2 2026-03-10T11:03:22.007 INFO:teuthology.orchestra.run.vm06.stderr:+ id=nfs.foo.0.0.vm06.gikzfj 2026-03-10T11:03:22.008 INFO:teuthology.orchestra.run.vm06.stderr:++ cut -d @ -f 1 2026-03-10T11:03:22.009 INFO:teuthology.orchestra.run.vm06.stderr:++ echo ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@nfs.foo.0.0.vm06.gikzfj.service 2026-03-10T11:03:22.009 INFO:teuthology.orchestra.run.vm06.stderr:++ cut -d - -f 2- 2026-03-10T11:03:22.010 INFO:teuthology.orchestra.run.vm06.stderr:+ fsid=2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:03:22.010 INFO:teuthology.orchestra.run.vm06.stderr:+ echo 'Removing daemon nfs.foo.0.0.vm06.gikzfj fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240...' 2026-03-10T11:03:22.010 INFO:teuthology.orchestra.run.vm06.stdout:Removing daemon nfs.foo.0.0.vm06.gikzfj fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240... 2026-03-10T11:03:22.010 INFO:teuthology.orchestra.run.vm06.stderr:+ sudo /home/ubuntu/cephtest/cephadm rm-daemon --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 --name nfs.foo.0.0.vm06.gikzfj 2026-03-10T11:03:22.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:21 vm07.local ceph-mon[56438]: from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:22.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:21 vm07.local ceph-mon[56438]: pgmap v87: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-10T11:03:22.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:22.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:21 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:22.845 INFO:teuthology.orchestra.run.vm06.stdout:Waking up cephadm... 2026-03-10T11:03:22.845 INFO:teuthology.orchestra.run.vm06.stderr:+ echo 'Waking up cephadm...' 2026-03-10T11:03:22.846 INFO:teuthology.orchestra.run.vm06.stderr:+ sudo /home/ubuntu/cephtest/cephadm shell -- ceph orch ps --refresh 2026-03-10T11:03:23.026 INFO:teuthology.orchestra.run.vm06.stderr:Inferring fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:03:23.077 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:23.180 INFO:teuthology.orchestra.run.vm06.stderr:Using ceph image with id '654f31e6858e' and tag 'e911bdebe5c8faa3800735d1568fcdca65db60df' created on 2026-02-25 18:57:17 +0000 UTC 2026-03-10T11:03:23.180 INFO:teuthology.orchestra.run.vm06.stderr:quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-10T11:03:23.205 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:22 vm06.local ceph-mon[49534]: from='client.14766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:23.205 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:23.205 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:23.205 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:23.205 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:23.205 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:22 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:23.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:22 vm07.local ceph-mon[56438]: from='client.14766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:23.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:22 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager.vm06 vm06 *:9093,9094 running (115s) 1s ago 2m 24.7M - 0.25.0 c8568f914cd2 a807579b2855 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm06 vm06 *:9926 running (2m) 1s ago 2m 9139k - 19.2.3-678-ge911bdeb 654f31e6858e f04afec3b0fe 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm07 vm07 *:9926 running (2m) 2s ago 2m 6660k - 19.2.3-678-ge911bdeb 654f31e6858e 43cdb28ee734 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm06 vm06 running (2m) 1s ago 2m 7662k - 19.2.3-678-ge911bdeb 654f31e6858e cbe6bed3aad1 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm07 vm07 running (2m) 2s ago 2m 7650k - 19.2.3-678-ge911bdeb 654f31e6858e 03f155afca45 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:grafana.vm06 vm06 *:3000 running (114s) 1s ago 2m 83.9M - 10.4.0 c8b91775d855 7b6b7920b655 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm06.rbqjdy vm06 *:2999,9999 running (14s) 1s ago 51s 3778k - 2.3.17-d1c9119 e85424b0d443 c787689fcef0 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm07.clltbz vm07 *:2999,9999 running (3s) 2s ago 47s 3653k - 2.3.17-d1c9119 e85424b0d443 80ca68874d21 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:keepalived.nfs.foo.vm06.ewpmce vm06 running (43s) 1s ago 42s 2399k - 2.2.4 4a3a1ff181d9 d2007caba65b 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:keepalived.nfs.foo.vm07.hlkpmk vm07 running (38s) 2s ago 38s 2403k - 2.2.4 4a3a1ff181d9 fdaf5f3ccc33 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:mds.foofs.vm06.yhlcqi vm06 running (58s) 1s ago 58s 18.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6fec54b6ddfc 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:mds.foofs.vm07.yrhofr vm07 running (59s) 2s ago 59s 17.4M - 19.2.3-678-ge911bdeb 654f31e6858e 5fdb98239c7e 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm06.luxohm vm06 *:9283,8765,8443 running (3m) 1s ago 3m 567M - 19.2.3-678-ge911bdeb 654f31e6858e f8029ae232b2 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm07.rgmael vm07 *:8443,9283,8765 running (2m) 2s ago 2m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 15f8f374cde9 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm06 vm06 running (3m) 1s ago 3m 61.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5c495012543a 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm07 vm07 running (2m) 2s ago 2m 39.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d9b06c633c41 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:nfs.foo.0.0.vm06.gikzfj vm06 *:12999 running (56s) 1s ago 56s 54.8M - 5.9 654f31e6858e 89915e94a5a5 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm06 vm06 *:9100 running (2m) 1s ago 2m 9701k - 1.7.0 72c9c2088986 89fe88f92fef 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm07 vm07 *:9100 running (2m) 2s ago 2m 9537k - 1.7.0 72c9c2088986 b301c2e37cef 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.0 vm07 running (91s) 2s ago 91s 64.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e cadd21134cd1 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.1 vm06 running (90s) 1s ago 90s 66.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1b78317a4f01 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.2 vm07 running (89s) 2s ago 89s 42.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e abfe9471d7a1 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.3 vm06 running (87s) 1s ago 87s 66.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e37bdbcfe4d7 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.4 vm07 running (86s) 2s ago 86s 66.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6161db3e3079 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.5 vm06 running (85s) 1s ago 84s 62.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9b755fe7792b 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.6 vm07 running (84s) 2s ago 84s 40.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 2588369018ea 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:osd.7 vm06 running (82s) 1s ago 82s 44.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c83d191c7d41 2026-03-10T11:03:23.439 INFO:teuthology.orchestra.run.vm06.stdout:prometheus.vm06 vm06 *:9095 running (35s) 1s ago 2m 34.1M - 2.51.0 1d3b7f56885b c856e73a40fe 2026-03-10T11:03:23.643 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:24.032 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:23 vm07.local ceph-mon[56438]: pgmap v88: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-10T11:03:24.032 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:24.032 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:23 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:03:24.077 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:23 vm06.local ceph-mon[49534]: pgmap v88: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-10T11:03:24.077 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:24.077 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:23 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:03:24.645 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:24.645 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:25.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:24 vm06.local ceph-mon[49534]: from='client.14770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:25.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:25.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:24 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:25.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:24 vm07.local ceph-mon[56438]: from='client.14770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:25.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:25.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:24 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:25.650 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:25.650 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: pgmap v89: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm06.gikzfj"}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm06.gikzfj"}]': finished 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T11:03:26.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T11:03:26.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T11:03:26.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T11:03:26.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:25 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: pgmap v89: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm06.gikzfj"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm06.gikzfj"}]': finished 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm06.aoilme-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T11:03:26.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:25 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:26.654 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:26.654 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:26.965 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Fencing old nfs.foo.0.0.vm06.gikzfj 2026-03-10T11:03:26.965 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Creating key for client.nfs.foo.0.1.vm06.aoilme 2026-03-10T11:03:26.965 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-10T11:03:26.965 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Rados config object exists: conf-nfs.foo 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Creating key for client.nfs.foo.0.1.vm06.aoilme-rgw 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Bind address in nfs.foo.0.1.vm06.aoilme's ganesha conf is defaulting to empty 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Deploying daemon nfs.foo.0.1.vm06.aoilme on vm06 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: Evicting (and blocklisting) client session 24377 (192.168.123.106:0/112614629) 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013]' entity='mds.foofs.vm07.yrhofr' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.106:0/112614629"}]: dispatch 2026-03-10T11:03:27.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:26 vm06.local ceph-mon[49534]: from='mds.? ' entity='mds.foofs.vm07.yrhofr' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.106:0/112614629"}]: dispatch 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Fencing old nfs.foo.0.0.vm06.gikzfj 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Creating key for client.nfs.foo.0.1.vm06.aoilme 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Rados config object exists: conf-nfs.foo 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Creating key for client.nfs.foo.0.1.vm06.aoilme-rgw 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Bind address in nfs.foo.0.1.vm06.aoilme's ganesha conf is defaulting to empty 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Deploying daemon nfs.foo.0.1.vm06.aoilme on vm06 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: Evicting (and blocklisting) client session 24377 (192.168.123.106:0/112614629) 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mds.? [v2:192.168.123.107:6832/500950013,v1:192.168.123.107:6833/500950013]' entity='mds.foofs.vm07.yrhofr' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.106:0/112614629"}]: dispatch 2026-03-10T11:03:27.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:26 vm07.local ceph-mon[56438]: from='mds.? ' entity='mds.foofs.vm07.yrhofr' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.106:0/112614629"}]: dispatch 2026-03-10T11:03:27.655 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:27.655 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:28.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:28 vm06.local ceph-mon[49534]: pgmap v90: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-10T11:03:28.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:28 vm06.local ceph-mon[49534]: from='mds.? ' entity='mds.foofs.vm07.yrhofr' cmd='[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.106:0/112614629"}]': finished 2026-03-10T11:03:28.294 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:28 vm06.local ceph-mon[49534]: osdmap e34: 8 total, 8 up, 8 in 2026-03-10T11:03:28.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:28 vm07.local ceph-mon[56438]: pgmap v90: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-10T11:03:28.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:28 vm07.local ceph-mon[56438]: from='mds.? ' entity='mds.foofs.vm07.yrhofr' cmd='[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.106:0/112614629"}]': finished 2026-03-10T11:03:28.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:28 vm07.local ceph-mon[56438]: osdmap e34: 8 total, 8 up, 8 in 2026-03-10T11:03:28.660 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:28.660 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:29.665 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:29.665 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:29.752 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: Reconfiguring haproxy.nfs.foo.vm06.rbqjdy (dependencies changed)... 2026-03-10T11:03:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: Reconfiguring daemon haproxy.nfs.foo.vm06.rbqjdy on vm06 2026-03-10T11:03:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: pgmap v92: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 818 B/s rd, 511 B/s wr, 1 op/s 2026-03-10T11:03:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.753 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:29 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: Reconfiguring haproxy.nfs.foo.vm06.rbqjdy (dependencies changed)... 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: Reconfiguring daemon haproxy.nfs.foo.vm06.rbqjdy on vm06 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: pgmap v92: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 818 B/s rd, 511 B/s wr, 1 op/s 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:29.776 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:29 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.666 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:30.666 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:30.677 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:30 vm06.local ceph-mon[49534]: Reconfiguring haproxy.nfs.foo.vm07.clltbz (dependencies changed)... 2026-03-10T11:03:30.677 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:30 vm06.local ceph-mon[49534]: Reconfiguring daemon haproxy.nfs.foo.vm07.clltbz on vm07 2026-03-10T11:03:30.677 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:30 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.677 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:30 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.677 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:30 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.677 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:30 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:30.818 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:30 vm07.local ceph-mon[56438]: Reconfiguring haproxy.nfs.foo.vm07.clltbz (dependencies changed)... 2026-03-10T11:03:30.818 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:30 vm07.local ceph-mon[56438]: Reconfiguring daemon haproxy.nfs.foo.vm07.clltbz on vm07 2026-03-10T11:03:30.818 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:30 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.818 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:30 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.818 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:30 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:30.818 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:30 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T11:03:31.668 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:31.668 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:31.671 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:31 vm06.local ceph-mon[49534]: pgmap v93: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 716 B/s wr, 2 op/s 2026-03-10T11:03:31.671 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:31 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:31.671 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:31 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:31.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:31 vm07.local ceph-mon[56438]: pgmap v93: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 716 B/s wr, 2 op/s 2026-03-10T11:03:31.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:31 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:31.916 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:31 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:32.669 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:32.669 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:33.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:33 vm06.local ceph-mon[49534]: pgmap v94: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' 2026-03-10T11:03:33.507 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:33 vm07.local ceph-mon[56438]: pgmap v94: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-10T11:03:33.671 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:33.671 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:34.672 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:34.675 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:35.673 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:35.673 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:36.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:36 vm07.local ceph-mon[56438]: pgmap v95: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1023 B/s wr, 2 op/s 2026-03-10T11:03:36.424 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:36 vm06.local ceph-mon[49534]: pgmap v95: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1023 B/s wr, 2 op/s 2026-03-10T11:03:36.674 INFO:teuthology.orchestra.run.vm06.stderr:+ true 2026-03-10T11:03:36.674 INFO:teuthology.orchestra.run.vm06.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-10T11:03:37.396 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm07.clltbz 2026-03-10T11:03:37.396 INFO:teuthology.orchestra.run.vm06.stdout:Mount is back! 2026-03-10T11:03:37.396 INFO:teuthology.orchestra.run.vm06.stderr:+ echo 'Mount is back!' 2026-03-10T11:03:37.397 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:37 vm06.local ceph-mon[49534]: pgmap v96: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1023 B/s wr, 2 op/s 2026-03-10T11:03:37.398 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm07.local 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo "Check with $(hostname) ganesha(s) down..." 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> for c in `systemctl | grep ceph- | grep @nfs | awk '"'"'{print $1}'"'"'`; do 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> cid=`echo $c | sed '"'"'s/@/-/'"'"'` 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> id=`echo $c | cut -d @ -f 2 | sed '"'"'s/.service$//'"'"'` 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> echo "Removing daemon $id fsid $fsid..." 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> echo "Waking up cephadm..." 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> while ! timeout 1 cat /mnt/foo/testfile ; do true ; done 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> echo "Mount is back!" 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> done 2026-03-10T11:03:37.398 DEBUG:teuthology.orchestra.run.vm07:> ' 2026-03-10T11:03:37.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:37 vm07.local ceph-mon[56438]: pgmap v96: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1023 B/s wr, 2 op/s 2026-03-10T11:03:37.427 INFO:teuthology.orchestra.run.vm07.stderr:++ hostname 2026-03-10T11:03:37.428 INFO:teuthology.orchestra.run.vm07.stderr:+ echo 'Check with vm07.local ganesha(s) down...' 2026-03-10T11:03:37.428 INFO:teuthology.orchestra.run.vm07.stdout:Check with vm07.local ganesha(s) down... 2026-03-10T11:03:37.428 INFO:teuthology.orchestra.run.vm07.stderr:++ systemctl 2026-03-10T11:03:37.428 INFO:teuthology.orchestra.run.vm07.stderr:++ grep @nfs 2026-03-10T11:03:37.429 INFO:teuthology.orchestra.run.vm07.stderr:++ grep ceph- 2026-03-10T11:03:37.432 INFO:teuthology.orchestra.run.vm07.stderr:++ awk '{print $1}' 2026-03-10T11:03:37.436 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T11:03:37.439 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-03-10T11:03:37.439 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-03-10T11:03:37.630 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:37.731 INFO:teuthology.orchestra.run.vm06.stdout:167 167 2026-03-10T11:03:37.882 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch status' 2026-03-10T11:03:38.076 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:38.335 INFO:teuthology.orchestra.run.vm06.stdout:Backend: cephadm 2026-03-10T11:03:38.335 INFO:teuthology.orchestra.run.vm06.stdout:Available: Yes 2026-03-10T11:03:38.335 INFO:teuthology.orchestra.run.vm06.stdout:Paused: No 2026-03-10T11:03:38.519 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch ps' 2026-03-10T11:03:38.699 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager.vm06 vm06 *:9093,9094 running (2m) 6s ago 2m 24.7M - 0.25.0 c8568f914cd2 a807579b2855 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm06 vm06 *:9926 running (2m) 6s ago 2m 9206k - 19.2.3-678-ge911bdeb 654f31e6858e f04afec3b0fe 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm07 vm07 *:9926 running (2m) 7s ago 2m 6660k - 19.2.3-678-ge911bdeb 654f31e6858e 43cdb28ee734 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm06 vm06 running (2m) 6s ago 2m 7662k - 19.2.3-678-ge911bdeb 654f31e6858e cbe6bed3aad1 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm07 vm07 running (2m) 7s ago 2m 7650k - 19.2.3-678-ge911bdeb 654f31e6858e 03f155afca45 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:grafana.vm06 vm06 *:3000 running (2m) 6s ago 2m 75.3M - 10.4.0 c8b91775d855 7b6b7920b655 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm06.rbqjdy vm06 *:2999,9999 running (9s) 6s ago 67s 3665k - 2.3.17-d1c9119 e85424b0d443 f227316ca3d4 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.nfs.foo.vm07.clltbz vm07 *:2999,9999 running (8s) 7s ago 63s 3653k - 2.3.17-d1c9119 e85424b0d443 93efad3813dc 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:keepalived.nfs.foo.vm06.ewpmce vm06 running (58s) 6s ago 58s 2399k - 2.2.4 4a3a1ff181d9 d2007caba65b 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:keepalived.nfs.foo.vm07.hlkpmk vm07 running (54s) 7s ago 54s 2403k - 2.2.4 4a3a1ff181d9 fdaf5f3ccc33 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:mds.foofs.vm06.yhlcqi vm06 running (74s) 6s ago 74s 18.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6fec54b6ddfc 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:mds.foofs.vm07.yrhofr vm07 running (75s) 7s ago 75s 17.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5fdb98239c7e 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm06.luxohm vm06 *:9283,8765,8443 running (3m) 6s ago 3m 569M - 19.2.3-678-ge911bdeb 654f31e6858e f8029ae232b2 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm07.rgmael vm07 *:8443,9283,8765 running (2m) 7s ago 2m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 15f8f374cde9 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm06 vm06 running (3m) 6s ago 3m 50.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5c495012543a 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm07 vm07 running (2m) 7s ago 2m 40.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d9b06c633c41 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:nfs.foo.0.1.vm06.aoilme vm06 *:12999 running (12s) 6s ago 12s 53.1M - 5.9 654f31e6858e 59a30bb69801 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm06 vm06 *:9100 running (2m) 6s ago 2m 9713k - 1.7.0 72c9c2088986 89fe88f92fef 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm07 vm07 *:9100 running (2m) 7s ago 2m 9554k - 1.7.0 72c9c2088986 b301c2e37cef 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.0 vm07 running (107s) 7s ago 107s 65.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e cadd21134cd1 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.1 vm06 running (106s) 6s ago 106s 66.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1b78317a4f01 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.2 vm07 running (105s) 7s ago 105s 42.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e abfe9471d7a1 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.3 vm06 running (103s) 6s ago 103s 66.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e37bdbcfe4d7 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.4 vm07 running (102s) 7s ago 102s 66.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6161db3e3079 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.5 vm06 running (100s) 6s ago 100s 62.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9b755fe7792b 2026-03-10T11:03:38.965 INFO:teuthology.orchestra.run.vm06.stdout:osd.6 vm07 running (99s) 7s ago 99s 41.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 2588369018ea 2026-03-10T11:03:38.966 INFO:teuthology.orchestra.run.vm06.stdout:osd.7 vm06 running (97s) 6s ago 97s 43.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c83d191c7d41 2026-03-10T11:03:38.966 INFO:teuthology.orchestra.run.vm06.stdout:prometheus.vm06 vm06 *:9095 running (50s) 6s ago 2m 36.2M - 2.51.0 1d3b7f56885b c856e73a40fe 2026-03-10T11:03:39.003 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:39 vm06.local ceph-mon[49534]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:03:39.116 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch ls' 2026-03-10T11:03:39.295 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:39.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:39 vm07.local ceph-mon[56438]: from='mgr.14217 192.168.123.106:0/3840909170' entity='mgr.vm06.luxohm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T11:03:39.541 INFO:teuthology.orchestra.run.vm06.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T11:03:39.541 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager ?:9093,9094 1/1 7s ago 3m count:1 2026-03-10T11:03:39.541 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter ?:9926 2/2 8s ago 3m * 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:crash 2/2 8s ago 3m * 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:grafana ?:3000 1/1 7s ago 3m count:1 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:ingress.nfs.foo 12.12.1.106:2999,9999 4/4 8s ago 75s count:2 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:mds.foofs 2/2 8s ago 77s count:2 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:mgr 2/2 8s ago 3m count:2 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:mon 2/2 8s ago 2m vm06:192.168.123.106=vm06;vm07:192.168.123.107=vm07;count:2 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:nfs.foo ?:12999 1/1 7s ago 75s count:1 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter ?:9100 2/2 8s ago 3m * 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:osd.all-available-devices 8 8s ago 2m * 2026-03-10T11:03:39.542 INFO:teuthology.orchestra.run.vm06.stdout:prometheus ?:9095 1/1 7s ago 3m count:1 2026-03-10T11:03:39.714 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch host ls' 2026-03-10T11:03:39.891 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:40.179 INFO:teuthology.orchestra.run.vm06.stdout:HOST ADDR LABELS STATUS 2026-03-10T11:03:40.179 INFO:teuthology.orchestra.run.vm06.stdout:vm06 192.168.123.106 2026-03-10T11:03:40.179 INFO:teuthology.orchestra.run.vm06.stdout:vm07 192.168.123.107 2026-03-10T11:03:40.179 INFO:teuthology.orchestra.run.vm06.stdout:2 hosts in cluster 2026-03-10T11:03:40.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:40 vm06.local ceph-mon[49534]: from='client.14794 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:40.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:40 vm06.local ceph-mon[49534]: pgmap v97: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 703 B/s rd, 791 B/s wr, 1 op/s 2026-03-10T11:03:40.253 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:40 vm06.local ceph-mon[49534]: from='client.14798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:40.337 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch device ls' 2026-03-10T11:03:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:40 vm07.local ceph-mon[56438]: from='client.14794 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:40 vm07.local ceph-mon[56438]: pgmap v97: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 703 B/s rd, 791 B/s wr, 1 op/s 2026-03-10T11:03:40.416 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:40 vm07.local ceph-mon[56438]: from='client.14798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:40.527 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:40.775 INFO:teuthology.orchestra.run.vm06.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T11:03:40.775 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 77s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T11:03:40.775 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 77s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.775 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 77s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.775 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 77s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.776 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 77s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.776 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 78s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T11:03:40.776 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdb hdd DWNBRSTVMM07001 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.776 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdc hdd DWNBRSTVMM07002 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.776 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vdd hdd DWNBRSTVMM07003 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.776 INFO:teuthology.orchestra.run.vm06.stdout:vm07 /dev/vde hdd DWNBRSTVMM07004 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T11:03:40.929 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-03-10T11:03:41.109 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:41.153 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:41 vm06.local ceph-mon[49534]: from='client.14802 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:41.377 INFO:teuthology.orchestra.run.vm06.stdout:osd.all-available-devices 8 9s ago 2m * 2026-03-10T11:03:41.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:41 vm07.local ceph-mon[56438]: from='client.14802 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:41.547 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-03-10T11:03:41.550 INFO:tasks.vip:Removing 12.12.0.106 (and any VIPs) on vm06.local iface eth0... 2026-03-10T11:03:41.550 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr del 12.12.0.106/22 dev eth0 2026-03-10T11:03:41.574 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr del 12.12.1.106/22 dev eth0 2026-03-10T11:03:41.637 INFO:tasks.vip:Removing 12.12.0.107 (and any VIPs) on vm07.local iface eth0... 2026-03-10T11:03:41.637 DEBUG:teuthology.orchestra.run.vm07:> sudo ip addr del 12.12.0.107/22 dev eth0 2026-03-10T11:03:41.666 DEBUG:teuthology.orchestra.run.vm07:> sudo ip addr del 12.12.1.106/22 dev eth0 2026-03-10T11:03:41.732 INFO:teuthology.orchestra.run.vm07.stderr:Error: ipv4: Address not found. 2026-03-10T11:03:41.733 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-03-10T11:03:41.733 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-10T11:03:41.736 INFO:tasks.cephadm:Teardown begin 2026-03-10T11:03:41.736 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:03:41.761 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:03:41.798 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-10T11:03:41.799 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 -- ceph mgr module disable cephadm 2026-03-10T11:03:41.981 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/mon.vm06/config 2026-03-10T11:03:42.004 INFO:teuthology.orchestra.run.vm06.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-10T11:03:42.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local ceph-mon[49534]: from='client.14806 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:42.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local ceph-mon[49534]: from='client.14810 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:42.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local ceph-mon[49534]: pgmap v98: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1.1 KiB/s wr, 2 op/s 2026-03-10T11:03:42.031 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-10T11:03:42.032 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-10T11:03:42.032 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T11:03:42.047 DEBUG:teuthology.orchestra.run.vm07:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T11:03:42.066 INFO:tasks.cephadm:Stopping all daemons... 2026-03-10T11:03:42.066 INFO:tasks.cephadm.mon.vm06:Stopping mon.vm06... 2026-03-10T11:03:42.066 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06 2026-03-10T11:03:42.365 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local systemd[1]: Stopping Ceph mon.vm06 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240... 2026-03-10T11:03:42.365 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06[49510]: 2026-03-10T11:03:42.203+0000 7f28c6626640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm06 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T11:03:42.365 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06[49510]: 2026-03-10T11:03:42.203+0000 7f28c6626640 -1 mon.vm06@0(leader) e2 *** Got Signal Terminated *** 2026-03-10T11:03:42.365 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 10 11:03:42 vm06.local podman[103249]: 2026-03-10 11:03:42.246748709 +0000 UTC m=+0.058604391 container died 5c495012543afbefb6c8ac026cbada1dd0dc5c380bdd599a02bc05bc659a39ba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm06, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-10T11:03:42.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:42 vm07.local ceph-mon[56438]: from='client.14806 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:42.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:42 vm07.local ceph-mon[56438]: from='client.14810 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T11:03:42.415 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:42 vm07.local ceph-mon[56438]: pgmap v98: 97 pgs: 97 active+clean; 485 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1.1 KiB/s wr, 2 op/s 2026-03-10T11:03:42.444 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm06.service' 2026-03-10T11:03:42.487 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T11:03:42.487 INFO:tasks.cephadm.mon.vm06:Stopped mon.vm06 2026-03-10T11:03:42.487 INFO:tasks.cephadm.mon.vm07:Stopping mon.vm07... 2026-03-10T11:03:42.487 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm07 2026-03-10T11:03:42.764 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:42 vm07.local systemd[1]: Stopping Ceph mon.vm07 for 2d4d1532-1c70-11f1-9ee5-8d2ac270c240... 2026-03-10T11:03:42.764 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:42 vm07.local ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm07[56414]: 2026-03-10T11:03:42.583+0000 7f6d713e1640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm07 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T11:03:42.764 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 11:03:42 vm07.local ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240-mon-vm07[56414]: 2026-03-10T11:03:42.583+0000 7f6d713e1640 -1 mon.vm07@1(peon) e2 *** Got Signal Terminated *** 2026-03-10T11:03:42.953 DEBUG:teuthology.orchestra.run.vm07:> sudo pkill -f 'journalctl -f -n 0 -u ceph-2d4d1532-1c70-11f1-9ee5-8d2ac270c240@mon.vm07.service' 2026-03-10T11:03:42.995 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T11:03:42.995 INFO:tasks.cephadm.mon.vm07:Stopped mon.vm07 2026-03-10T11:03:42.995 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 --force --keep-logs 2026-03-10T11:03:43.121 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:04:24.840 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 --force --keep-logs 2026-03-10T11:04:24.985 INFO:teuthology.orchestra.run.vm07.stdout:Deleting cluster with fsid: 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:04:58.681 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:04:58.708 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T11:04:58.733 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-10T11:04:58.733 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008/remote/vm06/crash 2026-03-10T11:04:58.733 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/crash -- . 2026-03-10T11:04:58.777 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/crash: Cannot open: No such file or directory 2026-03-10T11:04:58.777 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-03-10T11:04:58.778 DEBUG:teuthology.misc:Transferring archived files from vm07:/var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008/remote/vm07/crash 2026-03-10T11:04:58.778 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/crash -- . 2026-03-10T11:04:58.805 INFO:teuthology.orchestra.run.vm07.stderr:tar: /var/lib/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/crash: Cannot open: No such file or directory 2026-03-10T11:04:58.806 INFO:teuthology.orchestra.run.vm07.stderr:tar: Error is not recoverable: exiting now 2026-03-10T11:04:58.807 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-10T11:04:58.807 DEBUG:teuthology.orchestra.run.vm06:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-10T11:04:58.848 INFO:tasks.cephadm:Compressing logs... 2026-03-10T11:04:58.848 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T11:04:58.891 DEBUG:teuthology.orchestra.run.vm07:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T11:04:58.917 INFO:teuthology.orchestra.run.vm06.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T11:04:58.917 INFO:teuthology.orchestra.run.vm06.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T11:04:58.918 INFO:teuthology.orchestra.run.vm07.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T11:04:58.918 INFO:teuthology.orchestra.run.vm07.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T11:04:58.919 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mon.vm06.log 2026-03-10T11:04:58.920 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-volume.log 2026-03-10T11:04:58.921 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/cephadm.log: 91.8% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T11:04:58.921 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-client.ceph-exporter.vm07.log 2026-03-10T11:04:58.922 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mgr.vm07.rgmael.log 2026-03-10T11:04:58.923 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-client.ceph-exporter.vm07.log: 27.8% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-client.ceph-exporter.vm07.log.gz 2026-03-10T11:04:58.924 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mon.vm06.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log 2026-03-10T11:04:58.926 INFO:teuthology.orchestra.run.vm07.stderr: 95.7% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-volume.log.gz 2026-03-10T11:04:58.927 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mon.vm07.log 2026-03-10T11:04:58.927 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mgr.vm07.rgmael.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.audit.log 2026-03-10T11:04:58.929 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mon.vm07.log: 91.3% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mgr.vm07.rgmael.log.gz 2026-03-10T11:04:58.929 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log 2026-03-10T11:04:58.930 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mgr.vm06.luxohm.log 2026-03-10T11:04:58.931 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log: 91.6% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T11:04:58.931 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.audit.log: 91.2% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.audit.log.gz 2026-03-10T11:04:58.931 INFO:teuthology.orchestra.run.vm06.stderr: 84.5% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log.gz 2026-03-10T11:04:58.931 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.cephadm.log 2026-03-10T11:04:58.931 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.audit.log 2026-03-10T11:04:58.932 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log: 84.1% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.log.gz 2026-03-10T11:04:58.932 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.0.log 2026-03-10T11:04:58.932 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.cephadm.log: 82.9% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.cephadm.log.gz 2026-03-10T11:04:58.933 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.2.log 2026-03-10T11:04:58.938 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.4.log 2026-03-10T11:04:58.939 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mgr.vm06.luxohm.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.cephadm.log 2026-03-10T11:04:58.940 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.audit.log: 91.0% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.audit.log.gz 2026-03-10T11:04:58.942 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-volume.log 2026-03-10T11:04:58.943 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.cephadm.log: 83.3% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph.cephadm.log.gz 2026-03-10T11:04:58.949 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-client.ceph-exporter.vm06.log 2026-03-10T11:04:58.953 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.6.log 2026-03-10T11:04:58.953 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.1.log 2026-03-10T11:04:58.955 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-client.ceph-exporter.vm06.log: 92.7% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-client.ceph-exporter.vm06.log.gz 2026-03-10T11:04:58.957 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.3.log 2026-03-10T11:04:58.958 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.1.log: 95.8% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-volume.log.gz 2026-03-10T11:04:58.965 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mds.foofs.vm07.yrhofr.log 2026-03-10T11:04:58.966 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.5.log 2026-03-10T11:04:58.975 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.7.log 2026-03-10T11:04:58.976 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.6.log: /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mds.foofs.vm07.yrhofr.log: 84.0% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mds.foofs.vm07.yrhofr.log.gz 2026-03-10T11:04:58.985 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mds.foofs.vm06.yhlcqi.log 2026-03-10T11:04:58.995 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.7.log: /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mds.foofs.vm06.yhlcqi.log: 71.6% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mds.foofs.vm06.yhlcqi.log.gz 2026-03-10T11:04:59.029 INFO:teuthology.orchestra.run.vm07.stderr: 92.1% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mon.vm07.log.gz 2026-03-10T11:04:59.050 INFO:teuthology.orchestra.run.vm07.stderr: 93.4% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.0.log.gz 2026-03-10T11:04:59.054 INFO:teuthology.orchestra.run.vm07.stderr: 93.4% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.4.log.gz 2026-03-10T11:04:59.062 INFO:teuthology.orchestra.run.vm07.stderr: 93.4% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.2.log.gz 2026-03-10T11:04:59.085 INFO:teuthology.orchestra.run.vm07.stderr: 93.4% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.6.log.gz 2026-03-10T11:04:59.087 INFO:teuthology.orchestra.run.vm07.stderr: 2026-03-10T11:04:59.087 INFO:teuthology.orchestra.run.vm07.stderr:real 0m0.179s 2026-03-10T11:04:59.087 INFO:teuthology.orchestra.run.vm07.stderr:user 0m0.278s 2026-03-10T11:04:59.087 INFO:teuthology.orchestra.run.vm07.stderr:sys 0m0.038s 2026-03-10T11:04:59.120 INFO:teuthology.orchestra.run.vm06.stderr: 93.6% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.5.log.gz 2026-03-10T11:04:59.121 INFO:teuthology.orchestra.run.vm06.stderr: 93.6% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.1.log.gz 2026-03-10T11:04:59.121 INFO:teuthology.orchestra.run.vm06.stderr: 89.1% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mgr.vm06.luxohm.log.gz 2026-03-10T11:04:59.135 INFO:teuthology.orchestra.run.vm06.stderr: 93.6% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.7.log.gz 2026-03-10T11:04:59.139 INFO:teuthology.orchestra.run.vm06.stderr: 93.8% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-osd.3.log.gz 2026-03-10T11:04:59.170 INFO:teuthology.orchestra.run.vm06.stderr: 91.4% -- replaced with /var/log/ceph/2d4d1532-1c70-11f1-9ee5-8d2ac270c240/ceph-mon.vm06.log.gz 2026-03-10T11:04:59.172 INFO:teuthology.orchestra.run.vm06.stderr: 2026-03-10T11:04:59.172 INFO:teuthology.orchestra.run.vm06.stderr:real 0m0.266s 2026-03-10T11:04:59.172 INFO:teuthology.orchestra.run.vm06.stderr:user 0m0.448s 2026-03-10T11:04:59.172 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.044s 2026-03-10T11:04:59.172 INFO:tasks.cephadm:Archiving logs... 2026-03-10T11:04:59.172 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008/remote/vm06/log 2026-03-10T11:04:59.173 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T11:04:59.266 DEBUG:teuthology.misc:Transferring archived files from vm07:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008/remote/vm07/log 2026-03-10T11:04:59.266 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T11:04:59.314 INFO:tasks.cephadm:Removing cluster... 2026-03-10T11:04:59.314 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 --force 2026-03-10T11:04:59.444 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:04:59.535 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 --force 2026-03-10T11:04:59.666 INFO:teuthology.orchestra.run.vm07.stdout:Deleting cluster with fsid: 2d4d1532-1c70-11f1-9ee5-8d2ac270c240 2026-03-10T11:04:59.769 INFO:tasks.cephadm:Removing cephadm ... 2026-03-10T11:04:59.769 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T11:04:59.788 DEBUG:teuthology.orchestra.run.vm07:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T11:04:59.805 INFO:tasks.cephadm:Teardown complete 2026-03-10T11:04:59.805 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-10T11:04:59.808 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-10T11:04:59.808 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T11:04:59.830 DEBUG:teuthology.orchestra.run.vm07:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T11:04:59.845 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-10T11:04:59.859 INFO:teuthology.orchestra.run.vm07.stderr:bash: line 1: ntpq: command not found 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm07.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm07.stdout:=============================================================================== 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm07.stdout:^+ netcup01.theravenhub.com 2 6 377 18 +964us[ +964us] +/- 18ms 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm07.stdout:^+ formularfetischisten.de 2 6 377 20 -1844us[-1833us] +/- 35ms 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm07.stdout:^* v2202508239286376495.ult> 2 6 377 19 +935us[ +946us] +/- 18ms 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm07.stdout:^+ 172-104-138-148.ip.linod> 3 6 377 18 -930us[ -930us] +/- 17ms 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T11:04:59.927 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-10T11:04:59.928 INFO:teuthology.orchestra.run.vm06.stdout:^+ netcup01.theravenhub.com 2 6 377 20 +1215us[ +909us] +/- 19ms 2026-03-10T11:04:59.928 INFO:teuthology.orchestra.run.vm06.stdout:^+ formularfetischisten.de 2 6 377 18 -1825us[-1825us] +/- 35ms 2026-03-10T11:04:59.928 INFO:teuthology.orchestra.run.vm06.stdout:^+ v2202508239286376495.ult> 2 6 377 18 +924us[ +924us] +/- 18ms 2026-03-10T11:04:59.928 INFO:teuthology.orchestra.run.vm06.stdout:^* 172-104-138-148.ip.linod> 3 6 377 19 -899us[-1206us] +/- 17ms 2026-03-10T11:04:59.928 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T11:04:59.931 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T11:04:59.931 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T11:04:59.933 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T11:04:59.936 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T11:04:59.938 INFO:teuthology.task.internal:Duration was 474.908933 seconds 2026-03-10T11:04:59.938 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T11:04:59.941 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T11:04:59.941 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T11:04:59.969 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T11:05:00.005 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T11:05:00.010 INFO:teuthology.orchestra.run.vm07.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T11:05:00.441 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T11:05:00.441 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-10T11:05:00.441 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T11:05:00.470 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm07.local 2026-03-10T11:05:00.471 DEBUG:teuthology.orchestra.run.vm07:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T11:05:00.509 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T11:05:00.509 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T11:05:00.512 DEBUG:teuthology.orchestra.run.vm07:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T11:05:00.978 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T11:05:00.978 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T11:05:00.979 DEBUG:teuthology.orchestra.run.vm07:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T11:05:01.001 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T11:05:01.001 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T11:05:01.002 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T11:05:01.002 INFO:teuthology.orchestra.run.vm06.stderr: -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T11:05:01.002 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T11:05:01.003 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T11:05:01.004 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T11:05:01.004 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T11:05:01.004 INFO:teuthology.orchestra.run.vm07.stderr: -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T11:05:01.004 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T11:05:01.138 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.9% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T11:05:01.141 INFO:teuthology.orchestra.run.vm07.stderr: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T11:05:01.143 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T11:05:01.147 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T11:05:01.147 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T11:05:01.207 DEBUG:teuthology.orchestra.run.vm07:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T11:05:01.233 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T11:05:01.235 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T11:05:01.249 DEBUG:teuthology.orchestra.run.vm07:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T11:05:01.273 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-10T11:05:01.298 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern = core 2026-03-10T11:05:01.314 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T11:05:01.346 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T11:05:01.346 DEBUG:teuthology.orchestra.run.vm07:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T11:05:01.370 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T11:05:01.370 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T11:05:01.374 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T11:05:01.374 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008/remote/vm06 2026-03-10T11:05:01.374 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T11:05:01.413 DEBUG:teuthology.misc:Transferring archived files from vm07:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1008/remote/vm07 2026-03-10T11:05:01.414 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T11:05:01.441 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T11:05:01.442 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T11:05:01.456 DEBUG:teuthology.orchestra.run.vm07:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T11:05:01.497 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T11:05:01.502 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T11:05:01.502 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T11:05:01.504 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T11:05:01.505 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T11:05:01.515 DEBUG:teuthology.orchestra.run.vm07:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T11:05:01.531 INFO:teuthology.orchestra.run.vm06.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 11:05 /home/ubuntu/cephtest 2026-03-10T11:05:01.557 INFO:teuthology.orchestra.run.vm07.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 11:05 /home/ubuntu/cephtest 2026-03-10T11:05:01.558 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T11:05:01.564 INFO:teuthology.run:Summary data: description: orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 1-start 2-services/nfs-ingress2 3-final} duration: 474.9089334011078 owner: kyr success: true 2026-03-10T11:05:01.564 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T11:05:01.583 INFO:teuthology.run:pass