2026-03-08T22:59:00.768 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-08T22:59:00.774 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-08T22:59:00.802 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/291 branch: squid description: orch:cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} email: null first_in_suite: false flavor: default job_id: '291' last_in_suite: false machine_type: vps name: kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_REFRESH_FAILED log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - - host.b - osd.4 - osd.5 - osd.6 - osd.7 seed: 8017 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch:cephadm suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm00.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEU/OJLPzJTC3Y/nMkzwkWZLPDKX8T53QQ/ZsR0DzETUt3v1HEjuzHJGGsFYOzKl0PWp8y1Nlrn4/De5TyB4wPg= vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDFdMIsP8oyZyjVRrm8oxXZ8+mshQFK9IkNdrLI+dewC1a6buab7JH67Okdk9A9F5IheGFTOUZwWnE9MRL8U37s= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - cephadm.wait_for_service: service: mds.foofs - cephadm.shell: host.a: - ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2 - ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done - vip.exec: host.a: - mkdir /mnt/foo - while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done - echo test > /mnt/foo/testfile - sync - parallel: - upgrade-tasks - workload-tasks - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs cluster ls | grep foo - ceph nfs export ls foo --detailed - rados -p .nfs --all ls - - cephadm.shell: host.a: - 'set -ex [[ `ceph config get mgr mgr/cephadm/migration_current` -gt 2 ]] ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-08_22:22:45 tube: vps upgrade-tasks: sequential: - cephadm.shell: env: - sha1 host.a: - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph mgr module enable nfs --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 host.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph orch upgrade status - ceph health detail - ceph versions - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - cephadm.wait_for_service: service: nfs.foo user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 workload-tasks: sequential: - exec: host.a: - cd /mnt/foo && dbench 5 -t 600 || true - umount /mnt/foo - while ! mount -t nfs $(hostname):/fake /mnt/foo ; do sleep 5 ; done - cd /mnt/foo && dbench 5 -t 5 2026-03-08T22:59:00.802 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-08T22:59:00.803 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-08T22:59:00.803 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-08T22:59:00.803 INFO:teuthology.task.internal:Checking packages... 2026-03-08T22:59:00.803 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-08T22:59:00.803 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-08T22:59:00.803 INFO:teuthology.packaging:ref: None 2026-03-08T22:59:00.803 INFO:teuthology.packaging:tag: None 2026-03-08T22:59:00.803 INFO:teuthology.packaging:branch: squid 2026-03-08T22:59:00.803 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-08T22:59:00.803 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-08T22:59:01.589 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-08T22:59:01.590 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-08T22:59:01.634 INFO:teuthology.task.internal:no buildpackages task found 2026-03-08T22:59:01.634 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-08T22:59:01.635 INFO:teuthology.task.internal:Saving configuration 2026-03-08T22:59:01.640 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-08T22:59:01.720 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-08T22:59:01.727 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm00.local', 'description': '/archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/291', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-08 22:57:46.949396', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:00', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEU/OJLPzJTC3Y/nMkzwkWZLPDKX8T53QQ/ZsR0DzETUt3v1HEjuzHJGGsFYOzKl0PWp8y1Nlrn4/De5TyB4wPg='} 2026-03-08T22:59:01.732 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm08.local', 'description': '/archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/291', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-08 22:57:46.949827', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:08', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDFdMIsP8oyZyjVRrm8oxXZ8+mshQFK9IkNdrLI+dewC1a6buab7JH67Okdk9A9F5IheGFTOUZwWnE9MRL8U37s='} 2026-03-08T22:59:01.732 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-08T22:59:01.735 INFO:teuthology.task.internal:roles: ubuntu@vm00.local - ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0'] 2026-03-08T22:59:01.735 INFO:teuthology.task.internal:roles: ubuntu@vm08.local - ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7'] 2026-03-08T22:59:01.735 INFO:teuthology.run_tasks:Running task console_log... 2026-03-08T22:59:01.753 DEBUG:teuthology.task.console_log:vm00 does not support IPMI; excluding 2026-03-08T22:59:01.757 DEBUG:teuthology.task.console_log:vm08 does not support IPMI; excluding 2026-03-08T22:59:01.758 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fa5efba8ee0>, signals=[15]) 2026-03-08T22:59:01.758 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-08T22:59:01.760 INFO:teuthology.task.internal:Opening connections... 2026-03-08T22:59:01.760 DEBUG:teuthology.task.internal:connecting to ubuntu@vm00.local 2026-03-08T22:59:01.760 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T22:59:01.819 DEBUG:teuthology.task.internal:connecting to ubuntu@vm08.local 2026-03-08T22:59:01.819 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T22:59:01.878 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-08T22:59:01.894 DEBUG:teuthology.orchestra.run.vm00:> uname -m 2026-03-08T22:59:01.938 INFO:teuthology.orchestra.run.vm00.stdout:x86_64 2026-03-08T22:59:01.938 DEBUG:teuthology.orchestra.run.vm00:> cat /etc/os-release 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:NAME="CentOS Stream" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:VERSION="9" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:ID="centos" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:ID_LIKE="rhel fedora" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:VERSION_ID="9" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:PLATFORM_ID="platform:el9" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:ANSI_COLOR="0;31" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:LOGO="fedora-logo-icon" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:HOME_URL="https://centos.org/" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-08T22:59:01.994 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-08T22:59:01.995 INFO:teuthology.lock.ops:Updating vm00.local on lock server 2026-03-08T22:59:02.016 DEBUG:teuthology.orchestra.run.vm08:> uname -m 2026-03-08T22:59:02.031 INFO:teuthology.orchestra.run.vm08.stdout:x86_64 2026-03-08T22:59:02.031 DEBUG:teuthology.orchestra.run.vm08:> cat /etc/os-release 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:NAME="CentOS Stream" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:VERSION="9" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:ID="centos" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:ID_LIKE="rhel fedora" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:VERSION_ID="9" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:PLATFORM_ID="platform:el9" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:ANSI_COLOR="0;31" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:LOGO="fedora-logo-icon" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:HOME_URL="https://centos.org/" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-08T22:59:02.085 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-08T22:59:02.085 INFO:teuthology.lock.ops:Updating vm08.local on lock server 2026-03-08T22:59:02.090 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-08T22:59:02.092 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-08T22:59:02.093 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-08T22:59:02.093 DEBUG:teuthology.orchestra.run.vm00:> test '!' -e /home/ubuntu/cephtest 2026-03-08T22:59:02.095 DEBUG:teuthology.orchestra.run.vm08:> test '!' -e /home/ubuntu/cephtest 2026-03-08T22:59:02.140 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-08T22:59:02.141 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-08T22:59:02.141 DEBUG:teuthology.orchestra.run.vm00:> test -z $(ls -A /var/lib/ceph) 2026-03-08T22:59:02.150 DEBUG:teuthology.orchestra.run.vm08:> test -z $(ls -A /var/lib/ceph) 2026-03-08T22:59:02.164 INFO:teuthology.orchestra.run.vm00.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-08T22:59:02.196 INFO:teuthology.orchestra.run.vm08.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-08T22:59:02.197 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-08T22:59:02.205 DEBUG:teuthology.orchestra.run.vm00:> test -e /ceph-qa-ready 2026-03-08T22:59:02.219 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T22:59:02.411 DEBUG:teuthology.orchestra.run.vm08:> test -e /ceph-qa-ready 2026-03-08T22:59:02.425 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T22:59:02.610 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-08T22:59:02.612 INFO:teuthology.task.internal:Creating test directory... 2026-03-08T22:59:02.612 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-08T22:59:02.614 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-08T22:59:02.629 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-08T22:59:02.631 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-08T22:59:02.632 INFO:teuthology.task.internal:Creating archive directory... 2026-03-08T22:59:02.632 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-08T22:59:02.671 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-08T22:59:02.688 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-08T22:59:02.689 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-08T22:59:02.690 DEBUG:teuthology.orchestra.run.vm00:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-08T22:59:02.740 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T22:59:02.741 DEBUG:teuthology.orchestra.run.vm08:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-08T22:59:02.755 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T22:59:02.755 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-08T22:59:02.783 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-08T22:59:02.806 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T22:59:02.817 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T22:59:02.821 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T22:59:02.833 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T22:59:02.834 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-08T22:59:02.836 INFO:teuthology.task.internal:Configuring sudo... 2026-03-08T22:59:02.836 DEBUG:teuthology.orchestra.run.vm00:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-08T22:59:02.861 DEBUG:teuthology.orchestra.run.vm08:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-08T22:59:02.897 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-08T22:59:02.900 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-08T22:59:02.900 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-08T22:59:02.925 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-08T22:59:02.953 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-08T22:59:03.001 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-08T22:59:03.058 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T22:59:03.058 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-08T22:59:03.117 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-08T22:59:03.139 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-08T22:59:03.196 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-08T22:59:03.196 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-08T22:59:03.256 DEBUG:teuthology.orchestra.run.vm00:> sudo service rsyslog restart 2026-03-08T22:59:03.258 DEBUG:teuthology.orchestra.run.vm08:> sudo service rsyslog restart 2026-03-08T22:59:03.286 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-08T22:59:03.330 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-08T22:59:03.775 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-08T22:59:03.828 INFO:teuthology.task.internal:Starting timer... 2026-03-08T22:59:03.828 INFO:teuthology.run_tasks:Running task pcp... 2026-03-08T22:59:03.833 INFO:teuthology.run_tasks:Running task selinux... 2026-03-08T22:59:03.844 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-08T22:59:03.844 INFO:teuthology.task.selinux:Excluding vm00: VMs are not yet supported 2026-03-08T22:59:03.844 INFO:teuthology.task.selinux:Excluding vm08: VMs are not yet supported 2026-03-08T22:59:03.844 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-08T22:59:03.844 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-08T22:59:03.844 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-08T22:59:03.844 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-08T22:59:03.869 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-08T22:59:03.869 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-08T22:59:03.878 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-08T22:59:04.570 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-08T22:59:04.576 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-08T22:59:04.576 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory7y8_j9uw --limit vm00.local,vm08.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-08T23:01:50.251 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm00.local'), Remote(name='ubuntu@vm08.local')] 2026-03-08T23:01:50.251 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm00.local' 2026-03-08T23:01:50.251 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T23:01:50.319 DEBUG:teuthology.orchestra.run.vm00:> true 2026-03-08T23:01:50.399 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm00.local' 2026-03-08T23:01:50.399 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm08.local' 2026-03-08T23:01:50.399 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T23:01:50.463 DEBUG:teuthology.orchestra.run.vm08:> true 2026-03-08T23:01:50.543 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm08.local' 2026-03-08T23:01:50.543 INFO:teuthology.run_tasks:Running task clock... 2026-03-08T23:01:50.546 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-08T23:01:50.546 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-08T23:01:50.546 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-08T23:01:50.548 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-08T23:01:50.548 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-08T23:01:50.584 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-08T23:01:50.600 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-08T23:01:50.624 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-08T23:01:50.627 INFO:teuthology.orchestra.run.vm00.stderr:sudo: ntpd: command not found 2026-03-08T23:01:50.644 INFO:teuthology.orchestra.run.vm00.stdout:506 Cannot talk to daemon 2026-03-08T23:01:50.647 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-08T23:01:50.665 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-08T23:01:50.686 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-08T23:01:50.688 INFO:teuthology.orchestra.run.vm08.stderr:sudo: ntpd: command not found 2026-03-08T23:01:50.703 INFO:teuthology.orchestra.run.vm08.stdout:506 Cannot talk to daemon 2026-03-08T23:01:50.724 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-08T23:01:50.737 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-03-08T23:01:50.739 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-08T23:01:50.739 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-08T23:01:50.739 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-03-08T23:01:50.795 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-08T23:01:50.796 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-08T23:01:50.797 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-08T23:01:50.797 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-08T23:01:50.844 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_REFRESH_FAILED'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-08T23:01:50.844 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-08T23:01:50.844 INFO:tasks.cephadm:Cluster fsid is cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:01:50.844 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-08T23:01:50.844 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-08T23:01:50.844 INFO:tasks.cephadm:Monitor IPs: {'mon.vm00': '192.168.123.100', 'mon.vm08': '192.168.123.108'} 2026-03-08T23:01:50.844 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-08T23:01:50.844 DEBUG:teuthology.orchestra.run.vm00:> sudo hostname $(hostname -s) 2026-03-08T23:01:50.871 DEBUG:teuthology.orchestra.run.vm08:> sudo hostname $(hostname -s) 2026-03-08T23:01:50.907 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-08T23:01:50.907 DEBUG:teuthology.orchestra.run.vm00:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-08T23:01:51.165 INFO:teuthology.orchestra.run.vm00.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 8 23:01 /home/ubuntu/cephtest/cephadm 2026-03-08T23:01:51.165 DEBUG:teuthology.orchestra.run.vm08:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-08T23:01:51.257 INFO:teuthology.orchestra.run.vm08.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 8 23:01 /home/ubuntu/cephtest/cephadm 2026-03-08T23:01:51.257 DEBUG:teuthology.orchestra.run.vm00:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-08T23:01:51.282 DEBUG:teuthology.orchestra.run.vm08:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-08T23:01:51.307 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-08T23:01:51.307 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-08T23:01:51.324 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-08T23:01:51.517 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:01:51.581 INFO:teuthology.orchestra.run.vm08.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:03:42.882 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-08T23:03:42.882 INFO:teuthology.orchestra.run.vm00.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-08T23:03:42.882 INFO:teuthology.orchestra.run.vm00.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-08T23:03:42.882 INFO:teuthology.orchestra.run.vm00.stdout: "repo_digests": [ 2026-03-08T23:03:42.882 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-08T23:03:42.883 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-08T23:03:42.883 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-08T23:03:42.883 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout:{ 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout: "repo_digests": [ 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout: ] 2026-03-08T23:03:42.922 INFO:teuthology.orchestra.run.vm08.stdout:} 2026-03-08T23:03:42.938 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /etc/ceph 2026-03-08T23:03:42.967 DEBUG:teuthology.orchestra.run.vm08:> sudo mkdir -p /etc/ceph 2026-03-08T23:03:43.004 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 777 /etc/ceph 2026-03-08T23:03:43.040 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 777 /etc/ceph 2026-03-08T23:03:43.081 INFO:tasks.cephadm:Writing seed config... 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-08T23:03:43.081 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-08T23:03:43.081 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:03:43.081 DEBUG:teuthology.orchestra.run.vm00:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-08T23:03:43.103 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = cabe2722-1b42-11f1-9450-0d39870fd3ae [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-08T23:03:43.103 DEBUG:teuthology.orchestra.run.vm00:mon.vm00> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mon.vm00.service 2026-03-08T23:03:43.144 INFO:tasks.cephadm:Bootstrapping... 2026-03-08T23:03:43.144 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.100 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-08T23:03:43.301 INFO:teuthology.orchestra.run.vm00.stderr:-------------------------------------------------------------------------------- 2026-03-08T23:03:43.301 INFO:teuthology.orchestra.run.vm00.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', 'cabe2722-1b42-11f1-9450-0d39870fd3ae', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.100', '--skip-admin-label'] 2026-03-08T23:03:43.319 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-08T23:03:43.322 INFO:teuthology.orchestra.run.vm00.stderr:Verifying podman|docker is present... 2026-03-08T23:03:43.340 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-08T23:03:43.343 INFO:teuthology.orchestra.run.vm00.stderr:Verifying lvm2 is present... 2026-03-08T23:03:43.343 INFO:teuthology.orchestra.run.vm00.stderr:Verifying time synchronization is in place... 2026-03-08T23:03:43.351 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-08T23:03:43.358 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-08T23:03:43.364 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-08T23:03:43.371 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-08T23:03:43.371 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-08T23:03:43.371 INFO:teuthology.orchestra.run.vm00.stderr:Repeating the final host check... 2026-03-08T23:03:43.390 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-08T23:03:43.393 INFO:teuthology.orchestra.run.vm00.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-08T23:03:43.393 INFO:teuthology.orchestra.run.vm00.stderr:systemctl is present 2026-03-08T23:03:43.393 INFO:teuthology.orchestra.run.vm00.stderr:lvcreate is present 2026-03-08T23:03:43.400 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-08T23:03:43.408 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-08T23:03:43.417 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-08T23:03:43.425 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-08T23:03:43.426 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-08T23:03:43.426 INFO:teuthology.orchestra.run.vm00.stderr:Host looks OK 2026-03-08T23:03:43.426 INFO:teuthology.orchestra.run.vm00.stderr:Cluster fsid: cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:43.426 INFO:teuthology.orchestra.run.vm00.stderr:Acquiring lock 140343289079936 on /run/cephadm/cabe2722-1b42-11f1-9450-0d39870fd3ae.lock 2026-03-08T23:03:43.426 INFO:teuthology.orchestra.run.vm00.stderr:Lock 140343289079936 acquired on /run/cephadm/cabe2722-1b42-11f1-9450-0d39870fd3ae.lock 2026-03-08T23:03:43.426 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 3300 ... 2026-03-08T23:03:43.427 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 6789 ... 2026-03-08T23:03:43.427 INFO:teuthology.orchestra.run.vm00.stderr:Base mon IP is 192.168.123.100, final addrv is [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-08T23:03:43.431 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.100 metric 100 2026-03-08T23:03:43.431 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.100 metric 100 2026-03-08T23:03:43.435 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-08T23:03:43.435 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-08T23:03:43.438 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-08T23:03:43.438 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-08T23:03:43.438 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-08T23:03:43.438 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-08T23:03:43.438 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:0/64 scope link noprefixroute 2026-03-08T23:03:43.438 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-08T23:03:43.439 INFO:teuthology.orchestra.run.vm00.stderr:Mon IP `192.168.123.100` is in CIDR network `192.168.123.0/24` 2026-03-08T23:03:43.439 INFO:teuthology.orchestra.run.vm00.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-08T23:03:43.440 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:03:43.464 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:03:44.728 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Getting image source signatures 2026-03-08T23:03:44.728 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-08T23:03:44.728 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-08T23:03:44.728 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-08T23:03:44.728 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-08T23:03:44.729 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-08T23:03:44.729 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-08T23:03:44.734 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Writing manifest to image destination 2026-03-08T23:03:44.753 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-08T23:03:44.890 INFO:teuthology.orchestra.run.vm00.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-08T23:03:44.919 INFO:teuthology.orchestra.run.vm00.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-08T23:03:44.919 INFO:teuthology.orchestra.run.vm00.stderr:Extracting ceph user uid/gid from container image... 2026-03-08T23:03:44.988 INFO:teuthology.orchestra.run.vm00.stderr:stat: 167 167 2026-03-08T23:03:45.013 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial keys... 2026-03-08T23:03:45.107 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQBRAK5p8+NnBhAApmwUOrb5riGpqGLfTIeyPg== 2026-03-08T23:03:45.240 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQBRAK5pDvpTDhAAeBxg7d5EJ86a40g2+TsZYw== 2026-03-08T23:03:45.329 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQBRAK5pGY2nExAAVMK6dgnJ+/BcAjtzEMsxBA== 2026-03-08T23:03:45.348 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial monmap... 2026-03-08T23:03:45.430 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-08T23:03:45.431 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-08T23:03:45.431 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:45.431 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-08T23:03:45.448 INFO:teuthology.orchestra.run.vm00.stderr:monmaptool for vm00 [v2:192.168.123.100:3300,v1:192.168.123.100:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-08T23:03:45.448 INFO:teuthology.orchestra.run.vm00.stderr:setting min_mon_release = octopus 2026-03-08T23:03:45.448 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: set fsid to cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:45.448 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-08T23:03:45.448 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:03:45.448 INFO:teuthology.orchestra.run.vm00.stderr:Creating mon... 2026-03-08T23:03:45.618 INFO:teuthology.orchestra.run.vm00.stderr:create mon.vm00 on 2026-03-08T23:03:45.781 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-08T23:03:45.935 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae.target → /etc/systemd/system/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae.target. 2026-03-08T23:03:45.935 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae.target → /etc/systemd/system/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae.target. 2026-03-08T23:03:46.228 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mon.vm00.service: Unit ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mon.vm00.service not loaded. 2026-03-08T23:03:46.239 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae.target.wants/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mon.vm00.service → /etc/systemd/system/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@.service. 2026-03-08T23:03:46.609 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-08T23:03:46.610 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-08T23:03:46.610 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon to start... 2026-03-08T23:03:46.610 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon... 2026-03-08T23:03:46.660 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:46 vm00 ceph-mon[47398]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: cluster: 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: id: cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: services: 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon: 1 daemons, quorum vm00 (age 0.165483s) 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: data: 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pgs: 2026-03-08T23:03:46.816 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:46.869 INFO:teuthology.orchestra.run.vm00.stderr:mon is available 2026-03-08T23:03:46.869 INFO:teuthology.orchestra.run.vm00.stderr:Assimilating anything we can from ceph.conf... 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-08T23:03:47.095 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-08T23:03:47.124 INFO:teuthology.orchestra.run.vm00.stderr:Generating new minimal ceph.conf... 2026-03-08T23:03:47.329 INFO:teuthology.orchestra.run.vm00.stderr:Restarting the monitor... 2026-03-08T23:03:47.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 systemd[1]: Stopping Ceph mon.vm00 for cabe2722-1b42-11f1-9450-0d39870fd3ae... 2026-03-08T23:03:47.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 bash[47584]: Error: no container with name or ID "ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon.vm00" found: no such container 2026-03-08T23:03:47.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00[47394]: 2026-03-08T23:03:47.384+0000 7f0472509700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm00 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-08T23:03:47.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00[47394]: 2026-03-08T23:03:47.384+0000 7f0472509700 -1 mon.vm00@0(leader) e1 *** Got Signal Terminated *** 2026-03-08T23:03:47.720 INFO:teuthology.orchestra.run.vm00.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 podman[47591]: 2026-03-08 23:03:47.507300487 +0000 UTC m=+0.137675063 container died dc8ae5b1c2e122b7c60b1d06a869d3159d99c8f04df98beef591c536c8509336 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, ceph=True, GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., vcs-type=git, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, release=754, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image) 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 podman[47591]: 2026-03-08 23:03:47.521521616 +0000 UTC m=+0.151896192 container remove dc8ae5b1c2e122b7c60b1d06a869d3159d99c8f04df98beef591c536c8509336 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, release=754, io.openshift.expose-services=, name=centos-stream, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, io.openshift.tags=base centos centos-stream, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vendor=Red Hat, Inc., version=8, build-date=2022-05-03T08:36:31.336870, io.buildah.version=1.19.8, CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, maintainer=Guillaume Abrioux ) 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 bash[47591]: ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 bash[47607]: Error: no container with name or ID "ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon.vm00" found: no such container 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 systemd[1]: ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mon.vm00.service: Deactivated successfully. 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 systemd[1]: Stopped Ceph mon.vm00 for cabe2722-1b42-11f1-9450-0d39870fd3ae. 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 systemd[1]: Starting Ceph mon.vm00 for cabe2722-1b42-11f1-9450-0d39870fd3ae... 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 podman[47653]: 2026-03-08 23:03:47.656595877 +0000 UTC m=+0.021224858 container create 6ef5be51d7dede9105fbd2c70c0225090ef0bf980757f133b20f8dbefca0646d (image=quay.io/ceph/ceph:v17.2.0, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_BRANCH=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.tags=base centos centos-stream, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, RELEASE=HEAD, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, architecture=x86_64, maintainer=Guillaume Abrioux , build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, name=centos-stream, io.openshift.expose-services=, io.buildah.version=1.19.8, vcs-type=git, release=754, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vendor=Red Hat, Inc.) 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 podman[47653]: 2026-03-08 23:03:47.709619957 +0000 UTC m=+0.074248938 container init 6ef5be51d7dede9105fbd2c70c0225090ef0bf980757f133b20f8dbefca0646d (image=quay.io/ceph/ceph:v17.2.0, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, architecture=x86_64, vcs-type=git, release=754, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, ceph=True, name=centos-stream, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vendor=Red Hat, Inc.) 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 podman[47653]: 2026-03-08 23:03:47.713187019 +0000 UTC m=+0.077816000 container start 6ef5be51d7dede9105fbd2c70c0225090ef0bf980757f133b20f8dbefca0646d (image=quay.io/ceph/ceph:v17.2.0, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.expose-services=, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, release=754, GIT_CLEAN=True, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, name=centos-stream, RELEASE=HEAD, vcs-type=git, GIT_BRANCH=HEAD) 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 bash[47653]: 6ef5be51d7dede9105fbd2c70c0225090ef0bf980757f133b20f8dbefca0646d 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 podman[47653]: 2026-03-08 23:03:47.647060356 +0000 UTC m=+0.011689337 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph:v17.2.0 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 systemd[1]: Started Ceph mon.vm00 for cabe2722-1b42-11f1-9450-0d39870fd3ae. 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: set uid:gid to 167:167 (ceph:ceph) 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: pidfile_write: ignore empty --pid-file 2026-03-08T23:03:47.758 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: load: jerasure load: lrc 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: RocksDB version: 6.15.5 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Compile date Apr 18 2022 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: DB SUMMARY 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: DB Session ID: 6RVQAFK5ILY4PC24VI5T 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: CURRENT file: CURRENT 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: IDENTITY file: IDENTITY 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: MANIFEST file: MANIFEST-000009 size: 131 Bytes 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm00/store.db dir, Total Num: 1, files: 000008.sst 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm00/store.db: 000010.log size: 73307 ; 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.error_if_exists: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.create_if_missing: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.paranoid_checks: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.env: 0x55ff094c6860 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.fs: Posix File System 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.info_log: 0x55ff0b31bee0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_file_opening_threads: 16 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.statistics: (nil) 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.use_fsync: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_log_file_size: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.keep_log_file_num: 1000 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.recycle_log_file_num: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.allow_fallocate: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.allow_mmap_reads: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.allow_mmap_writes: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.use_direct_reads: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.create_missing_column_families: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.db_log_dir: 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-vm00/store.db 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.advise_random_on_open: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.db_write_buffer_size: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.write_buffer_manager: 0x55ff0b40c2a0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.rate_limiter: (nil) 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.wal_recovery_mode: 2 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.enable_thread_tracking: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.enable_pipelined_write: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.unordered_write: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.row_cache: None 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.wal_filter: None 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.allow_ingest_behind: 0 2026-03-08T23:03:47.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.preserve_deletes: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.two_write_queues: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.manual_wal_flush: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.atomic_flush: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.log_readahead_size: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.best_efforts_recovery: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.allow_data_in_errors: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.db_host_id: __hostname__ 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_background_jobs: 2 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_background_compactions: -1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_subcompactions: 1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_total_wal_size: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_open_files: -1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bytes_per_sync: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_readahead_size: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_background_flushes: -1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Compression algorithms supported: 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kZSTD supported: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kXpressCompression supported: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kLZ4HCCompression supported: 1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kLZ4Compression supported: 1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kBZip2Compression supported: 0 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kZlibCompression supported: 1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: kSnappyCompression supported: 1 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm00/store.db/MANIFEST-000009 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.merge_operator: 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_filter: None 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_filter_factory: None 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.sst_partitioner_factory: None 2026-03-08T23:03:47.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55ff0b2e7d00) 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_type: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: data_block_index_type: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_shortening: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: hash_index_allow_collision: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: checksum: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: no_block_cache: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache: 0x55ff0b352170 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_options: 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: capacity : 536870912 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: num_shard_bits : 4 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: strict_capacity_limit : 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_compressed: (nil) 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: persistent_cache: (nil) 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_size: 4096 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_size_deviation: 10 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_restart_interval: 16 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_block_restart_interval: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: metadata_block_size: 4096 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: partition_filters: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: use_delta_encoding: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: whole_key_filtering: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: verify_compression: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: format_version: 4 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: enable_index_compression: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_align: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.write_buffer_size: 33554432 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_write_buffer_number: 2 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression: NoCompression 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression: Disabled 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.prefix_extractor: nullptr 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.num_levels: 7 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.level: 32767 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.strategy: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-08T23:03:47.761 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compression_opts.enabled: false 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.target_file_size_base: 67108864 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.arena_block_size: 4194304 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.disable_auto_compactions: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.table_properties_collectors: 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.inplace_update_support: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.bloom_locality: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.max_successive_merges: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.paranoid_file_checks: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.force_consistency_checks: 1 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.report_bg_io_stats: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.ttl: 2592000 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.enable_blob_files: false 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.min_blob_size: 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.blob_file_size: 268435456 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm00/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 11, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 5 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/version_set.cc:4083] Creating manifest 13 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773011027753134, "job": 1, "event": "recovery_started", "wal_files": [10]} 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #10 mode 2 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773011027755383, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 14, "file_size": 70411, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 68729, "index_size": 175, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 9544, "raw_average_key_size": 49, "raw_value_size": 63298, "raw_average_value_size": 326, "num_data_blocks": 8, "num_entries": 194, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773011027, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "997a4c30-04ec-4845-8699-f5bcc7b1733a", "db_session_id": "6RVQAFK5ILY4PC24VI5T"}} 2026-03-08T23:03:47.762 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/version_set.cc:4083] Creating manifest 15 2026-03-08T23:03:47.763 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773011027756911, "job": 1, "event": "recovery_finished"} 2026-03-08T23:03:47.763 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-vm00/store.db/000010.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-08T23:03:47.953 INFO:teuthology.orchestra.run.vm00.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-08T23:03:47.953 INFO:teuthology.orchestra.run.vm00.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-08T23:03:47.953 INFO:teuthology.orchestra.run.vm00.stderr:Creating mgr... 2026-03-08T23:03:47.953 INFO:teuthology.orchestra.run.vm00.stderr:Verifying port 9283 ... 2026-03-08T23:03:48.017 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x55ff0b338a80 2026-03-08T23:03:48.017 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: DB pointer 0x55ff0b348000 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: starting mon.vm00 rank 0 at public addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] at bind addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon_data /var/lib/ceph/mon/ceph-vm00 fsid cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** DB Stats ** 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: L0 2/0 70.53 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Sum 2/0 70.53 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative compaction: 0.00 GB write, 3.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval compaction: 0.00 GB write, 3.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: L0 2/0 70.53 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Sum 2/0 70.53 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 34.4 0.00 0.00 1 0.002 0 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative compaction: 0.00 GB write, 3.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???) e1 preinit fsid cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).mds e1 new map 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).mds e1 print_map 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout: e1 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout: legacy client fscid: -1 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout: No filesystems configured 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: monmap e1: 1 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: fsmap 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: osdmap e1: 0 total, 0 up, 0 in 2026-03-08T23:03:48.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:47 vm00 ceph-mon[47668]: mgrmap e1: no daemons active 2026-03-08T23:03:48.103 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mgr.vm00.pkgtpt.service: Unit ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mgr.vm00.pkgtpt.service not loaded. 2026-03-08T23:03:48.111 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae.target.wants/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mgr.vm00.pkgtpt.service → /etc/systemd/system/ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@.service. 2026-03-08T23:03:48.439 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-08T23:03:48.439 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-08T23:03:48.439 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-08T23:03:48.439 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-08T23:03:48.439 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr to start... 2026-03-08T23:03:48.439 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr... 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "cabe2722-1b42-11f1-9450-0d39870fd3ae", 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:03:48.648 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:03:46.650782+0000", 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:03:48.649 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:03:48.825 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (1/15)... 2026-03-08T23:03:49.423 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:48 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/180678134' entity='client.admin' 2026-03-08T23:03:49.423 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:48 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3296314997' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:03:51.023 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:51.025 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:03:51.025 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "cabe2722-1b42-11f1-9450-0d39870fd3ae", 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:03:51.026 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:03:46.650782+0000", 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:03:51.027 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:03:51.067 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (2/15)... 2026-03-08T23:03:51.202 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:51 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1585032892' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: Activating manager daemon vm00.pkgtpt 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: mgrmap e2: vm00.pkgtpt(active, starting, since 0.00486458s) 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm00.pkgtpt", "id": "vm00.pkgtpt"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: Manager daemon vm00.pkgtpt is now available 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' 2026-03-08T23:03:53.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:52 vm00 ceph-mon[47668]: from='mgr.14100 192.168.123.100:0/3260421102' entity='mgr.vm00.pkgtpt' 2026-03-08T23:03:53.251 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:53.252 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:03:53.252 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "cabe2722-1b42-11f1-9450-0d39870fd3ae", 2026-03-08T23:03:53.252 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:03:53.252 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:03:53.252 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:03:53.253 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:03:46.650782+0000", 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:03:53.254 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:03:53.306 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (3/15)... 2026-03-08T23:03:54.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:53 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1581053083' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:03:54.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:53 vm00 ceph-mon[47668]: mgrmap e3: vm00.pkgtpt(active, since 1.01493s) 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "cabe2722-1b42-11f1-9450-0d39870fd3ae", 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "vm00" 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 7, 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:03:55.625 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:03:46.650782+0000", 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:03:55.626 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:03:55.675 INFO:teuthology.orchestra.run.vm00.stderr:mgr is available 2026-03-08T23:03:55.919 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:55 vm00 ceph-mon[47668]: mgrmap e4: vm00.pkgtpt(active, since 2s) 2026-03-08T23:03:55.919 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:55 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3973281719' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-08T23:03:55.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-08T23:03:55.952 INFO:teuthology.orchestra.run.vm00.stderr:Enabling cephadm module... 2026-03-08T23:03:57.159 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:56 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1344931878' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-08T23:03:57.159 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:56 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1654119112' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-08T23:03:57.160 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:03:57.160 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-08T23:03:57.160 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-08T23:03:57.160 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "vm00.pkgtpt", 2026-03-08T23:03:57.160 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-08T23:03:57.160 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:03:57.198 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-08T23:03:57.198 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 5... 2026-03-08T23:03:58.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:57 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1654119112' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-08T23:03:58.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:57 vm00 ceph-mon[47668]: mgrmap e5: vm00.pkgtpt(active, since 4s) 2026-03-08T23:03:58.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:03:57 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1880613070' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-08T23:04:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:01 vm00 ceph-mon[47668]: Active manager daemon vm00.pkgtpt restarted 2026-03-08T23:04:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:01 vm00 ceph-mon[47668]: Activating manager daemon vm00.pkgtpt 2026-03-08T23:04:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:01 vm00 ceph-mon[47668]: osdmap e2: 0 total, 0 up, 0 in 2026-03-08T23:04:02.218 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:04:02.219 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-08T23:04:02.219 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-08T23:04:02.219 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:04:02.246 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 5 is available 2026-03-08T23:04:02.246 INFO:teuthology.orchestra.run.vm00.stderr:Setting orchestrator backend to cephadm... 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: mgrmap e6: vm00.pkgtpt(active, starting, since 0.055164s) 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm00.pkgtpt", "id": "vm00.pkgtpt"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: Manager daemon vm00.pkgtpt is now available 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:02.293 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:04:02.294 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:02.294 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:04:02.294 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:02.294 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:02 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:02.744 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: value unchanged 2026-03-08T23:04:02.870 INFO:teuthology.orchestra.run.vm00.stderr:Generating ssh key... 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: [08/Mar/2026:23:04:02] ENGINE Bus STARTING 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: [08/Mar/2026:23:04:02] ENGINE Serving on https://192.168.123.100:7150 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: [08/Mar/2026:23:04:02] ENGINE Bus STARTED 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: mgrmap e7: vm00.pkgtpt(active, since 1.06259s) 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:03 vm00 ceph-mon[47668]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:03.686 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDW127BEIcdFEF3oW9ilN/Kpk/YNv6Xdw2WYdHrmEzly+J8h4SN1jMF1217HU9UXhtqn7+9KpB8HB/V/MxidbRii/nyJ7+xNvwQAmPykCP+r739szXphGUvv71HYz9BVwqGBMdm7fpkyYhyhIZtPpAbbvbu9+zQwdwal1ZWqIsgm1BXACTODbU4TV70ZKaX7HXPkMFBc4GQ2Oe0PMJqtgsytzdk0aZYTNsBqvtuFxtM4N7V7DCvHEiYNgyT4AH+CZIFO6hKdOCAEcUpXP8tycn6m7pfIcwChhSLpndSUivp3DrbQ5MQFRfXnsF308R3UuzuzugrYwQf81vvSjBEBy5OzZXYS2FB+E6naXnqFWrczVQZeDRQAhRJbWlhF/esbQJaL3OA8xWp2kj5ZqwtA5RLuam+KjoE04wLYJo/lHYFvAtpOHSdhZCFESibEGUxtqx+Ru6YPgD88yPdpXzwOoDzlZrrgZCNL8XEfnSXCck9Wnxyk3L9SI1MNuJq625dNI0= ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:04:03.749 INFO:teuthology.orchestra.run.vm00.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-08T23:04:03.749 INFO:teuthology.orchestra.run.vm00.stderr:Adding key to root@localhost authorized_keys... 2026-03-08T23:04:03.749 INFO:teuthology.orchestra.run.vm00.stderr:Adding host vm00... 2026-03-08T23:04:04.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:04 vm00 ceph-mon[47668]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:04.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:04 vm00 ceph-mon[47668]: Generating ssh key... 2026-03-08T23:04:04.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:04 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:04.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:04 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:04.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:04 vm00 ceph-mon[47668]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:04.272 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:04 vm00 ceph-mon[47668]: mgrmap e8: vm00.pkgtpt(active, since 2s) 2026-03-08T23:04:04.651 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Added host 'vm00' with addr '192.168.123.100' 2026-03-08T23:04:04.692 INFO:teuthology.orchestra.run.vm00.stderr:Deploying mon service with default placement... 2026-03-08T23:04:04.981 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-08T23:04:05.022 INFO:teuthology.orchestra.run.vm00.stderr:Deploying mgr service with default placement... 2026-03-08T23:04:05.281 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-08T23:04:05.311 INFO:teuthology.orchestra.run.vm00.stderr:Deploying crash service with default placement... 2026-03-08T23:04:05.555 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled crash update... 2026-03-08T23:04:05.584 INFO:teuthology.orchestra.run.vm00.stderr:Deploying prometheus service with default placement... 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: Deploying cephadm binary to vm00 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: Added host vm00 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:05.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:05 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:05.827 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled prometheus update... 2026-03-08T23:04:05.875 INFO:teuthology.orchestra.run.vm00.stderr:Deploying grafana service with default placement... 2026-03-08T23:04:06.133 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled grafana update... 2026-03-08T23:04:06.185 INFO:teuthology.orchestra.run.vm00.stderr:Deploying node-exporter service with default placement... 2026-03-08T23:04:06.494 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled node-exporter update... 2026-03-08T23:04:06.527 INFO:teuthology.orchestra.run.vm00.stderr:Deploying alertmanager service with default placement... 2026-03-08T23:04:06.906 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled alertmanager update... 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: Saving service mon spec with placement count:5 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: Saving service mgr spec with placement count:2 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: Saving service crash spec with placement * 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: Saving service prometheus spec with placement count:1 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:06 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:08.009 INFO:teuthology.orchestra.run.vm00.stderr:Enabling the dashboard module... 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: Saving service grafana spec with placement count:1 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: Saving service node-exporter spec with placement * 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: Saving service alertmanager spec with placement count:1 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1337400367' entity='client.admin' 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:07 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/2947609246' entity='client.admin' 2026-03-08T23:04:09.574 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:09 vm00 ceph-mon[47668]: from='mgr.14120 192.168.123.100:0/1425775668' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:09.574 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:09 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/46326799' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-08T23:04:09.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:04:09.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-08T23:04:09.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-08T23:04:09.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "vm00.pkgtpt", 2026-03-08T23:04:09.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-08T23:04:09.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:04:09.605 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-08T23:04:09.605 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 9... 2026-03-08T23:04:10.579 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:10 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/46326799' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-08T23:04:10.579 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:10 vm00 ceph-mon[47668]: mgrmap e9: vm00.pkgtpt(active, since 8s) 2026-03-08T23:04:10.579 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:10 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3960833790' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-08T23:04:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:13 vm00 ceph-mon[47668]: Active manager daemon vm00.pkgtpt restarted 2026-03-08T23:04:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:13 vm00 ceph-mon[47668]: Activating manager daemon vm00.pkgtpt 2026-03-08T23:04:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:13 vm00 ceph-mon[47668]: osdmap e3: 0 total, 0 up, 0 in 2026-03-08T23:04:14.861 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-08T23:04:14.861 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-08T23:04:14.861 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-08T23:04:14.861 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-08T23:04:14.918 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 9 is available 2026-03-08T23:04:14.918 INFO:teuthology.orchestra.run.vm00.stderr:Generating a dashboard self-signed certificate... 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: mgrmap e10: vm00.pkgtpt(active, starting, since 0.354021s) 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm00.pkgtpt", "id": "vm00.pkgtpt"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: Manager daemon vm00.pkgtpt is now available 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:04:15.118 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: [08/Mar/2026:23:04:14] ENGINE Bus STARTING 2026-03-08T23:04:15.119 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: [08/Mar/2026:23:04:14] ENGINE Serving on https://192.168.123.100:7150 2026-03-08T23:04:15.119 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: [08/Mar/2026:23:04:14] ENGINE Bus STARTED 2026-03-08T23:04:15.119 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:15.119 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:15.119 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:14 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:15.233 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-08T23:04:15.263 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial admin user... 2026-03-08T23:04:15.657 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$ld30uFx0q8cAAcTGl.2.LuWI60p0SDj620szT972dVdRT0tQ/pr1.", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773011055, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-08T23:04:15.701 INFO:teuthology.orchestra.run.vm00.stderr:Fetching dashboard port number... 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: mgrmap e11: vm00.pkgtpt(active, since 1.35742s) 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:15.920 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:15 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:15.920 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 8443 2026-03-08T23:04:15.976 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-08T23:04:15.976 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr:Ceph Dashboard is now available at: 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr: URL: https://vm00.local:8443/ 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr: User: admin 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr: Password: sxsjpkd14e 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:15.977 INFO:teuthology.orchestra.run.vm00.stderr:Enabling autotune for osd_memory_target 2026-03-08T23:04:16.594 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr:You can access the Ceph CLI with: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: ceph telemetry on 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr:For more information see: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-08T23:04:16.632 INFO:teuthology.orchestra.run.vm00.stderr:Bootstrap complete. 2026-03-08T23:04:16.672 INFO:tasks.cephadm:Fetching config... 2026-03-08T23:04:16.672 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:04:16.672 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-08T23:04:16.692 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-08T23:04:16.692 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:04:16.692 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-08T23:04:16.756 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-08T23:04:16.757 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:04:16.757 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/keyring of=/dev/stdout 2026-03-08T23:04:16.827 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-08T23:04:16.827 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:04:16.827 DEBUG:teuthology.orchestra.run.vm00:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-08T23:04:16.887 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-08T23:04:16.887 DEBUG:teuthology.orchestra.run.vm00:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDW127BEIcdFEF3oW9ilN/Kpk/YNv6Xdw2WYdHrmEzly+J8h4SN1jMF1217HU9UXhtqn7+9KpB8HB/V/MxidbRii/nyJ7+xNvwQAmPykCP+r739szXphGUvv71HYz9BVwqGBMdm7fpkyYhyhIZtPpAbbvbu9+zQwdwal1ZWqIsgm1BXACTODbU4TV70ZKaX7HXPkMFBc4GQ2Oe0PMJqtgsytzdk0aZYTNsBqvtuFxtM4N7V7DCvHEiYNgyT4AH+CZIFO6hKdOCAEcUpXP8tycn6m7pfIcwChhSLpndSUivp3DrbQ5MQFRfXnsF308R3UuzuzugrYwQf81vvSjBEBy5OzZXYS2FB+E6naXnqFWrczVQZeDRQAhRJbWlhF/esbQJaL3OA8xWp2kj5ZqwtA5RLuam+KjoE04wLYJo/lHYFvAtpOHSdhZCFESibEGUxtqx+Ru6YPgD88yPdpXzwOoDzlZrrgZCNL8XEfnSXCck9Wnxyk3L9SI1MNuJq625dNI0= ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-08T23:04:16.973 INFO:teuthology.orchestra.run.vm00.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDW127BEIcdFEF3oW9ilN/Kpk/YNv6Xdw2WYdHrmEzly+J8h4SN1jMF1217HU9UXhtqn7+9KpB8HB/V/MxidbRii/nyJ7+xNvwQAmPykCP+r739szXphGUvv71HYz9BVwqGBMdm7fpkyYhyhIZtPpAbbvbu9+zQwdwal1ZWqIsgm1BXACTODbU4TV70ZKaX7HXPkMFBc4GQ2Oe0PMJqtgsytzdk0aZYTNsBqvtuFxtM4N7V7DCvHEiYNgyT4AH+CZIFO6hKdOCAEcUpXP8tycn6m7pfIcwChhSLpndSUivp3DrbQ5MQFRfXnsF308R3UuzuzugrYwQf81vvSjBEBy5OzZXYS2FB+E6naXnqFWrczVQZeDRQAhRJbWlhF/esbQJaL3OA8xWp2kj5ZqwtA5RLuam+KjoE04wLYJo/lHYFvAtpOHSdhZCFESibEGUxtqx+Ru6YPgD88yPdpXzwOoDzlZrrgZCNL8XEfnSXCck9Wnxyk3L9SI1MNuJq625dNI0= ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:04:16.984 DEBUG:teuthology.orchestra.run.vm08:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDW127BEIcdFEF3oW9ilN/Kpk/YNv6Xdw2WYdHrmEzly+J8h4SN1jMF1217HU9UXhtqn7+9KpB8HB/V/MxidbRii/nyJ7+xNvwQAmPykCP+r739szXphGUvv71HYz9BVwqGBMdm7fpkyYhyhIZtPpAbbvbu9+zQwdwal1ZWqIsgm1BXACTODbU4TV70ZKaX7HXPkMFBc4GQ2Oe0PMJqtgsytzdk0aZYTNsBqvtuFxtM4N7V7DCvHEiYNgyT4AH+CZIFO6hKdOCAEcUpXP8tycn6m7pfIcwChhSLpndSUivp3DrbQ5MQFRfXnsF308R3UuzuzugrYwQf81vvSjBEBy5OzZXYS2FB+E6naXnqFWrczVQZeDRQAhRJbWlhF/esbQJaL3OA8xWp2kj5ZqwtA5RLuam+KjoE04wLYJo/lHYFvAtpOHSdhZCFESibEGUxtqx+Ru6YPgD88yPdpXzwOoDzlZrrgZCNL8XEfnSXCck9Wnxyk3L9SI1MNuJq625dNI0= ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-08T23:04:17.019 INFO:teuthology.orchestra.run.vm08.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDW127BEIcdFEF3oW9ilN/Kpk/YNv6Xdw2WYdHrmEzly+J8h4SN1jMF1217HU9UXhtqn7+9KpB8HB/V/MxidbRii/nyJ7+xNvwQAmPykCP+r739szXphGUvv71HYz9BVwqGBMdm7fpkyYhyhIZtPpAbbvbu9+zQwdwal1ZWqIsgm1BXACTODbU4TV70ZKaX7HXPkMFBc4GQ2Oe0PMJqtgsytzdk0aZYTNsBqvtuFxtM4N7V7DCvHEiYNgyT4AH+CZIFO6hKdOCAEcUpXP8tycn6m7pfIcwChhSLpndSUivp3DrbQ5MQFRfXnsF308R3UuzuzugrYwQf81vvSjBEBy5OzZXYS2FB+E6naXnqFWrczVQZeDRQAhRJbWlhF/esbQJaL3OA8xWp2kj5ZqwtA5RLuam+KjoE04wLYJo/lHYFvAtpOHSdhZCFESibEGUxtqx+Ru6YPgD88yPdpXzwOoDzlZrrgZCNL8XEfnSXCck9Wnxyk3L9SI1MNuJq625dNI0= ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:04:17.031 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-08T23:04:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:16 vm00 ceph-mon[47668]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:16 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1759416751' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-08T23:04:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:16 vm00 ceph-mon[47668]: mgrmap e12: vm00.pkgtpt(active, since 2s) 2026-03-08T23:04:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:16 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3179462837' entity='client.admin' 2026-03-08T23:04:17.663 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-08T23:04:17.663 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-08T23:04:18.381 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm08 2026-03-08T23:04:18.381 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-08T23:04:18.381 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.conf 2026-03-08T23:04:18.398 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-08T23:04:18.398 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:04:18.453 INFO:tasks.cephadm:Adding host vm08 to orchestrator... 2026-03-08T23:04:18.454 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch host add vm08 2026-03-08T23:04:18.620 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3332212027' entity='client.admin' 2026-03-08T23:04:18.620 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:18.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:04:18.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:18.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: Deploying daemon alertmanager.vm00 on vm00 2026-03-08T23:04:18.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:18.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:18 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:19 vm00 ceph-mon[47668]: from='client.14188 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:19.997 INFO:teuthology.orchestra.run.vm00.stdout:Added host 'vm08' with addr '192.168.123.108' 2026-03-08T23:04:20.066 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch host ls --format=json 2026-03-08T23:04:20.645 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:04:20.645 INFO:teuthology.orchestra.run.vm00.stdout:[{"addr": "192.168.123.100", "hostname": "vm00", "labels": [], "status": ""}, {"addr": "192.168.123.108", "hostname": "vm08", "labels": [], "status": ""}] 2026-03-08T23:04:20.721 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-08T23:04:20.721 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd crush tunables default 2026-03-08T23:04:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:20 vm00 ceph-mon[47668]: mgrmap e13: vm00.pkgtpt(active, since 6s) 2026-03-08T23:04:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:20 vm00 ceph-mon[47668]: Deploying cephadm binary to vm08 2026-03-08T23:04:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:20 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:20 vm00 ceph-mon[47668]: Added host vm08 2026-03-08T23:04:21.883 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:21 vm00 ceph-mon[47668]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:04:22.682 INFO:teuthology.orchestra.run.vm00.stderr:adjusted tunables profile to default 2026-03-08T23:04:22.779 INFO:tasks.cephadm:Adding mon.vm00 on vm00 2026-03-08T23:04:22.779 INFO:tasks.cephadm:Adding mon.vm08 on vm08 2026-03-08T23:04:22.779 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch apply mon '2;vm00:192.168.123.100=vm00;vm08:192.168.123.108=vm08' 2026-03-08T23:04:22.871 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:22 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3994610670' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-08T23:04:23.382 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mon update... 2026-03-08T23:04:23.793 DEBUG:teuthology.orchestra.run.vm08:mon.vm08> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@mon.vm08.service 2026-03-08T23:04:23.795 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:23.795 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3994610670' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: Deploying daemon crash.vm00 on vm00 2026-03-08T23:04:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:23 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:24.363 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:24.363 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:24.366 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='client.14194 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm00:192.168.123.100=vm00;vm08:192.168.123.108=vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: Saving service mon spec with placement vm00:192.168.123.100=vm00;vm08:192.168.123.108=vm08;count:2 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: Deploying daemon grafana.vm00 on vm00 2026-03-08T23:04:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:24 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3902697399' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:25.436 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:25.437 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:25.954 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:25.954 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:25.957 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:26.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:26 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2518089597' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:27.321 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:27.322 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:27.813 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:27.813 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:27.814 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:28.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:27 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2045040345' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:28.878 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:28.878 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:29.420 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:29.420 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:29.422 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:29 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1842971891' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:30.467 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:30.467 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:31.250 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:31.250 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:31.252 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:31 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1784118867' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:32.347 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:32.347 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:33.287 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:33.287 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:33.289 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:34.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:33 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2511236890' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:34.610 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:34.610 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:35.167 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:35.167 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:35.169 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:34 vm00 ceph-mon[47668]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:36.240 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:36.240 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:36.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:35 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1950705285' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:36.778 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:36.778 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:36.779 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:36 vm00 ceph-mon[47668]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:36 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/4271773801' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:37.859 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:37.859 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:38.463 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:38.464 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:38.465 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:38 vm00 ceph-mon[47668]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:38 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/215990837' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:39.525 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:39.526 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:40.172 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:40.172 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:40.174 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:41.244 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:41.244 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:40 vm00 ceph-mon[47668]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:40 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2709411197' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:41.772 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:41.772 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:41.774 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:42.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:41 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2945099788' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:42.842 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:42.842 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:43.403 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:43.403 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:43.406 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:42 vm00 ceph-mon[47668]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:44.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3380286994' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:44.580 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:44.580 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:45.223 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:45.223 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:45.226 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:44 vm00 ceph-mon[47668]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:46.313 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:46.313 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:46.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:46 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3703633086' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:46.889 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:46.889 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:46.892 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:47 vm00 ceph-mon[47668]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:47 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/4168398357' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:48.022 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:48.022 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:48.723 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:48.723 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:48.724 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:49 vm00 ceph-mon[47668]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/127238244' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:49.965 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:49.966 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:50.538 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:50.538 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:50.540 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:51.604 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:51.604 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:51 vm00 ceph-mon[47668]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:51 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1761005256' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:52.195 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:52.195 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:52.196 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:53.456 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:53.456 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:53 vm00 ceph-mon[47668]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:53 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2343599576' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:54 vm00 ceph-mon[47668]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:55.655 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:55.655 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:55.657 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:56.101 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:55 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3630648618' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:56.910 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:56.910 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:57.217 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:57 vm00 ceph-mon[47668]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:57.217 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:57 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:04:58.038 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:04:58.038 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:04:58.043 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:04:58.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:58 vm00 ceph-mon[47668]: Deploying daemon node-exporter.vm00 on vm00 2026-03-08T23:04:59.309 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:04:59.309 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:04:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:59 vm00 ceph-mon[47668]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:04:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:59 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/2769738719' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:04:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:04:59 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:00.068 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:00.068 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:00.070 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:01.142 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:01.142 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:00 vm00 ceph-mon[47668]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:00 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:00 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-08T23:05:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:00 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3642326345' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:01.695 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:01.695 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:01.698 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:02.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:01 vm00 ceph-mon[47668]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-08T23:05:02.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:01 vm00 ceph-mon[47668]: mgrmap e14: vm00.pkgtpt(active, since 47s) 2026-03-08T23:05:02.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:01 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3307839351' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:02.773 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:02.773 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:03.348 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:03.348 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:03.351 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:04.437 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:04.438 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:04.635 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:04 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3923815422' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:05.147 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:05.147 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:05.149 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:05.555 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:05 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3954680137' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:06.210 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:06.210 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: Active manager daemon vm00.pkgtpt restarted 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: Activating manager daemon vm00.pkgtpt 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: osdmap e5: 0 total, 0 up, 0 in 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: mgrmap e15: vm00.pkgtpt(active, starting, since 0.0548853s) 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm00.pkgtpt", "id": "vm00.pkgtpt"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: Manager daemon vm00.pkgtpt is now available 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: [08/Mar/2026:23:05:05] ENGINE Bus STARTING 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: [08/Mar/2026:23:05:05] ENGINE Serving on https://192.168.123.100:7150 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: [08/Mar/2026:23:05:05] ENGINE Bus STARTED 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:06.758 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:06.758 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:06.760 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: mgrmap e16: vm00.pkgtpt(active, since 1.06051s) 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/498888443' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: Updating vm00:/etc/ceph/ceph.conf 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:07.826 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:07.826 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:08.421 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:08.421 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:08.428 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: mgrmap e17: vm00.pkgtpt(active, since 2s) 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:08 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1749807931' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:09.477 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:09.478 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:09 vm00 ceph-mon[47668]: Deploying daemon crash.vm08 on vm08 2026-03-08T23:05:10.054 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:10.054 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:03:45.431550Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:05:10.057 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-08T23:05:11.164 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-08T23:05:11.164 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mon dump -f json 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.fufswh", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm08.fufswh", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: Deploying daemon mgr.vm08.fufswh on vm08 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/161559841' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:05:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:11.602 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 systemd[1]: Starting Ceph mon.vm08 for cabe2722-1b42-11f1-9450-0d39870fd3ae... 2026-03-08T23:05:11.851 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 podman[56782]: 2026-03-08 23:05:11.845860244 +0000 UTC m=+0.064183563 container create 0afd4637552ff7a6bf57bc7f0bc1d30391b83b438f98c1597a765e6af250e319 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm08, distribution-scope=public, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, CEPH_POINT_RELEASE=-17.2.0, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_BRANCH=HEAD, name=centos-stream, RELEASE=HEAD, architecture=x86_64, io.openshift.expose-services=, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.buildah.version=1.19.8, vcs-type=git, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870) 2026-03-08T23:05:12.124 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 podman[56782]: 2026-03-08 23:05:11.817288579 +0000 UTC m=+0.035611898 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-08T23:05:12.124 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 podman[56782]: 2026-03-08 23:05:11.927591364 +0000 UTC m=+0.145914683 container init 0afd4637552ff7a6bf57bc7f0bc1d30391b83b438f98c1597a765e6af250e319 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm08, io.openshift.tags=base centos centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, io.buildah.version=1.19.8, vendor=Red Hat, Inc., distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, version=8, vcs-type=git, release=754, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-08T23:05:12.124 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 podman[56782]: 2026-03-08 23:05:11.932149378 +0000 UTC m=+0.150472697 container start 0afd4637552ff7a6bf57bc7f0bc1d30391b83b438f98c1597a765e6af250e319 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm08, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_REPO=https://github.com/ceph/ceph-container.git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, architecture=x86_64, GIT_CLEAN=True, maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, version=8, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, release=754, GIT_BRANCH=HEAD, io.openshift.tags=base centos centos-stream, name=centos-stream, ceph=True, distribution-scope=public, com.redhat.component=centos-stream-container, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD) 2026-03-08T23:05:12.124 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 bash[56782]: 0afd4637552ff7a6bf57bc7f0bc1d30391b83b438f98c1597a765e6af250e319 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 systemd[1]: Started Ceph mon.vm08 for cabe2722-1b42-11f1-9450-0d39870fd3ae. 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: set uid:gid to 167:167 (ceph:ceph) 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: pidfile_write: ignore empty --pid-file 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: load: jerasure load: lrc 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: RocksDB version: 6.15.5 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Compile date Apr 18 2022 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: DB SUMMARY 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: DB Session ID: 130XM4BWLPMYZXEMZRRV 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: CURRENT file: CURRENT 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: IDENTITY file: IDENTITY 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: MANIFEST file: MANIFEST-000003 size: 57 Bytes 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm08/store.db dir, Total Num: 0, files: 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm08/store.db: 000004.log size: 511 ; 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.error_if_exists: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.create_if_missing: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.paranoid_checks: 1 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.env: 0x55a15ccc6860 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.fs: Posix File System 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.info_log: 0x55a15dd35ee0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_file_opening_threads: 16 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.statistics: (nil) 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.use_fsync: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_log_file_size: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.keep_log_file_num: 1000 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.recycle_log_file_num: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.allow_fallocate: 1 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.allow_mmap_reads: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.allow_mmap_writes: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.use_direct_reads: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.create_missing_column_families: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.db_log_dir: 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-vm08/store.db 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.advise_random_on_open: 1 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.db_write_buffer_size: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.write_buffer_manager: 0x55a15de262a0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-08T23:05:12.125 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.rate_limiter: (nil) 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.wal_recovery_mode: 2 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.enable_thread_tracking: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.enable_pipelined_write: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.unordered_write: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.row_cache: None 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.wal_filter: None 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.allow_ingest_behind: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.preserve_deletes: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.two_write_queues: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.manual_wal_flush: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.atomic_flush: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.log_readahead_size: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.best_efforts_recovery: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.allow_data_in_errors: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.db_host_id: __hostname__ 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_background_jobs: 2 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_background_compactions: -1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_subcompactions: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_total_wal_size: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_open_files: -1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bytes_per_sync: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_readahead_size: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_background_flushes: -1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Compression algorithms supported: 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kZSTD supported: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kXpressCompression supported: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kLZ4HCCompression supported: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kLZ4Compression supported: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kBZip2Compression supported: 0 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kZlibCompression supported: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: kSnappyCompression supported: 1 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm08/store.db/MANIFEST-000003 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.merge_operator: 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_filter: None 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_filter_factory: None 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.sst_partitioner_factory: None 2026-03-08T23:05:12.126 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55a15dd01d00) 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: cache_index_and_filter_blocks: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: pin_top_level_index_and_filter: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: index_type: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: data_block_index_type: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: index_shortening: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: hash_index_allow_collision: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: checksum: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: no_block_cache: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache: 0x55a15dd6c170 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache_name: BinnedLRUCache 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache_options: 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: capacity : 536870912 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: num_shard_bits : 4 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: strict_capacity_limit : 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: high_pri_pool_ratio: 0.000 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache_compressed: (nil) 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: persistent_cache: (nil) 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_size: 4096 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_size_deviation: 10 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_restart_interval: 16 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: index_block_restart_interval: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: metadata_block_size: 4096 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: partition_filters: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: use_delta_encoding: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: whole_key_filtering: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: verify_compression: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: read_amp_bytes_per_bit: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: format_version: 4 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: enable_index_compression: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_align: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.write_buffer_size: 33554432 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_write_buffer_number: 2 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression: NoCompression 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression: Disabled 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.prefix_extractor: nullptr 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.num_levels: 7 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.level: 32767 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.strategy: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compression_opts.enabled: false 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-08T23:05:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.target_file_size_base: 67108864 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.arena_block_size: 4194304 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.disable_auto_compactions: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.table_properties_collectors: 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.inplace_update_support: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.bloom_locality: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.max_successive_merges: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.paranoid_file_checks: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.force_consistency_checks: 1 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.report_bg_io_stats: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.ttl: 2592000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.enable_blob_files: false 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.min_blob_size: 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.blob_file_size: 268435456 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm08/store.db/MANIFEST-000003 succeeded,manifest_file_number is 3, next_file_number is 5, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 0 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [db/version_set.cc:4083] Creating manifest 7 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773011111997972, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #4 mode 2 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:11 vm08 ceph-mon[56824]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773011112012183, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1540, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773011111, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "2676bb5c-ebfc-4f36-a9e1-4ab4b121d64c", "db_session_id": "130XM4BWLPMYZXEMZRRV"}} 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: [db/version_set.cc:4083] Creating manifest 9 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773011112013766, "job": 1, "event": "recovery_finished"} 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-vm08/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x55a15dd52700 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: DB pointer 0x55a15ddc6000 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** DB Stats ** 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:05:12.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** Compaction Stats [default] ** 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: L0 1/0 1.50 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Sum 1/0 1.50 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** Compaction Stats [default] ** 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative compaction: 0.00 GB write, 0.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval compaction: 0.00 GB write, 0.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** Compaction Stats [default] ** 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: L0 1/0 1.50 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Sum 1/0 1.50 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** Compaction Stats [default] ** 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.1 0.01 0.00 1 0.014 0 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative compaction: 0.00 GB write, 0.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08 does not exist in monmap, will attempt to join an existing cluster 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: using public_addr v2:192.168.123.108:0/0 -> [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: starting mon.vm08 rank -1 at public addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] at bind addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon_data /var/lib/ceph/mon/ceph-vm08 fsid cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(???) e0 preinit fsid cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Deploying daemon node-exporter.vm00 on vm00 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/2769738719' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).mds e1 new map 2026-03-08T23:05:12.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).mds e1 print_map 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout: e1 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout: legacy client fscid: -1 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout: No filesystems configured 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mon.vm08@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/3642326345' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14162 192.168.123.100:0/2750115164' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mgrmap e14: vm00.pkgtpt(active, since 47s) 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/3307839351' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/3923815422' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/3954680137' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Active manager daemon vm00.pkgtpt restarted 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Activating manager daemon vm00.pkgtpt 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: osdmap e5: 0 total, 0 up, 0 in 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mgrmap e15: vm00.pkgtpt(active, starting, since 0.0548853s) 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm00.pkgtpt", "id": "vm00.pkgtpt"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Manager daemon vm00.pkgtpt is now available 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: [08/Mar/2026:23:05:05] ENGINE Bus STARTING 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: [08/Mar/2026:23:05:05] ENGINE Serving on https://192.168.123.100:7150 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: [08/Mar/2026:23:05:05] ENGINE Bus STARTED 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mgrmap e16: vm00.pkgtpt(active, since 1.06051s) 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/498888443' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Updating vm00:/etc/ceph/ceph.conf 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: mgrmap e17: vm00.pkgtpt(active, since 2s) 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/1749807931' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Deploying daemon crash.vm08 on vm08 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.fufswh", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm08.fufswh", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Deploying daemon mgr.vm08.fufswh on vm08 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/161559841' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:12.131 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:12 vm08 ceph-mon[56824]: Deploying daemon mon.vm08 on vm08 2026-03-08T23:05:12.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:11 vm00 ceph-mon[47668]: Deploying daemon mon.vm08 on vm08 2026-03-08T23:05:17.091 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:05:17.091 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":2,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","modified":"2026-03-08T23:05:12.074633Z","created":"2026-03-08T23:03:45.431550Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm08","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-08T23:05:17.092 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 2 2026-03-08T23:05:17.150 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-08T23:05:17.150 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph config generate-minimal-conf 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: Deploying daemon node-exporter.vm08 on vm08 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: mon.vm00 calling monitor election 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/1964183081' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: mon.vm08 calling monitor election 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.fufswh/crt"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: mon.vm00 is new leader, mons vm00,vm08 in quorum (ranks 0,1) 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: monmap e2: 2 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],vm08=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0]} 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: fsmap 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: osdmap e5: 0 total, 0 up, 0 in 2026-03-08T23:05:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: mgrmap e17: vm00.pkgtpt(active, since 11s) 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: Standby manager daemon vm08.fufswh started 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: overall HEALTH_OK 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.fufswh/key"}]: dispatch 2026-03-08T23:05:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:17 vm08 ceph-mon[56824]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: Deploying daemon node-exporter.vm08 on vm08 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: mon.vm00 calling monitor election 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1964183081' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: mon.vm08 calling monitor election 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.fufswh/crt"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:17.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: mon.vm00 is new leader, mons vm00,vm08 in quorum (ranks 0,1) 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: monmap e2: 2 mons at {vm00=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],vm08=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0]} 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: fsmap 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: osdmap e5: 0 total, 0 up, 0 in 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: mgrmap e17: vm00.pkgtpt(active, since 11s) 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: Standby manager daemon vm08.fufswh started 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: overall HEALTH_OK 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.fufswh/key"}]: dispatch 2026-03-08T23:05:17.388 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:17 vm00 ceph-mon[47668]: from='mgr.? 192.168.123.108:0/2828873815' entity='mgr.vm08.fufswh' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:05:17.652 INFO:teuthology.orchestra.run.vm00.stdout:# minimal ceph.conf for cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:05:17.652 INFO:teuthology.orchestra.run.vm00.stdout:[global] 2026-03-08T23:05:17.652 INFO:teuthology.orchestra.run.vm00.stdout: fsid = cabe2722-1b42-11f1-9450-0d39870fd3ae 2026-03-08T23:05:17.652 INFO:teuthology.orchestra.run.vm00.stdout: mon_host = [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] 2026-03-08T23:05:17.705 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-08T23:05:17.705 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:05:17.706 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.conf 2026-03-08T23:05:17.735 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:05:17.735 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:05:17.801 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-08T23:05:17.801 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.conf 2026-03-08T23:05:17.828 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-08T23:05:17.828 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:05:17.898 INFO:tasks.cephadm:Deploying OSDs... 2026-03-08T23:05:17.899 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:05:17.899 DEBUG:teuthology.orchestra.run.vm00:> dd if=/scratch_devs of=/dev/stdout 2026-03-08T23:05:17.915 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:05:17.915 DEBUG:teuthology.orchestra.run.vm00:> ls /dev/[sv]d? 2026-03-08T23:05:17.975 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vda 2026-03-08T23:05:17.975 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdb 2026-03-08T23:05:17.975 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdc 2026-03-08T23:05:17.975 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdd 2026-03-08T23:05:17.975 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vde 2026-03-08T23:05:17.975 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-08T23:05:17.975 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-08T23:05:17.975 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdb 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdb 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-08 23:04:18.165339084 +0000 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-08 23:04:17.896338911 +0000 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-08 23:04:17.896338911 +0000 2026-03-08T23:05:18.034 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-08 22:57:53.263000000 +0000 2026-03-08T23:05:18.034 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-08T23:05:18.100 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-08T23:05:18.101 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-08T23:05:18.101 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000213469 s, 2.4 MB/s 2026-03-08T23:05:18.102 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-08T23:05:18.163 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdc 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdc 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-08 23:04:18.223339122 +0000 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-08 23:04:17.899338913 +0000 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-08 23:04:17.899338913 +0000 2026-03-08T23:05:18.224 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-08 22:57:53.269000000 +0000 2026-03-08T23:05:18.224 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-08T23:05:18.289 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-08T23:05:18.289 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-08T23:05:18.289 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000221043 s, 2.3 MB/s 2026-03-08T23:05:18.290 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-08T23:05:18.349 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdd 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdd 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-08 23:04:18.277339156 +0000 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-08 23:04:17.902338915 +0000 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-08 23:04:17.902338915 +0000 2026-03-08T23:05:18.409 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-08 22:57:53.272000000 +0000 2026-03-08T23:05:18.409 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-08T23:05:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:18 vm00 ceph-mon[47668]: Deploying daemon prometheus.vm00 on vm00 2026-03-08T23:05:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:18 vm00 ceph-mon[47668]: mgrmap e18: vm00.pkgtpt(active, since 11s), standbys: vm08.fufswh 2026-03-08T23:05:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm08.fufswh", "id": "vm08.fufswh"}]: dispatch 2026-03-08T23:05:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:18 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1239188995' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:18.452 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-08T23:05:18.452 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-08T23:05:18.452 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000182391 s, 2.8 MB/s 2026-03-08T23:05:18.453 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-08T23:05:18.510 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vde 2026-03-08T23:05:18.569 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vde 2026-03-08T23:05:18.569 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:18.569 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-08T23:05:18.570 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:18.570 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:18.570 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-08 23:04:18.333339192 +0000 2026-03-08T23:05:18.570 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-08 23:04:17.899338913 +0000 2026-03-08T23:05:18.570 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-08 23:04:17.899338913 +0000 2026-03-08T23:05:18.570 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-08 22:57:53.286000000 +0000 2026-03-08T23:05:18.570 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-08T23:05:18.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:18 vm08 ceph-mon[56824]: Deploying daemon prometheus.vm00 on vm00 2026-03-08T23:05:18.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:18 vm08 ceph-mon[56824]: mgrmap e18: vm00.pkgtpt(active, since 11s), standbys: vm08.fufswh 2026-03-08T23:05:18.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr metadata", "who": "vm08.fufswh", "id": "vm08.fufswh"}]: dispatch 2026-03-08T23:05:18.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:18 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/1239188995' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:18.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:18.634 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-08T23:05:18.634 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-08T23:05:18.634 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000181789 s, 2.8 MB/s 2026-03-08T23:05:18.635 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-08T23:05:18.697 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-08T23:05:18.697 DEBUG:teuthology.orchestra.run.vm08:> dd if=/scratch_devs of=/dev/stdout 2026-03-08T23:05:18.713 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:05:18.713 DEBUG:teuthology.orchestra.run.vm08:> ls /dev/[sv]d? 2026-03-08T23:05:18.770 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vda 2026-03-08T23:05:18.771 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdb 2026-03-08T23:05:18.771 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdc 2026-03-08T23:05:18.771 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdd 2026-03-08T23:05:18.771 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vde 2026-03-08T23:05:18.771 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-08T23:05:18.771 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-08T23:05:18.771 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdb 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdb 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 244 Links: 1 Device type: fc,10 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-08 23:05:08.008781835 +0000 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-08 23:05:07.685781770 +0000 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-08 23:05:07.685781770 +0000 2026-03-08T23:05:18.830 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-08 22:58:24.246000000 +0000 2026-03-08T23:05:18.830 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-08T23:05:18.896 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-08T23:05:18.896 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-08T23:05:18.896 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000181179 s, 2.8 MB/s 2026-03-08T23:05:18.897 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-08T23:05:18.955 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdc 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdc 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-08 23:05:08.085781850 +0000 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-08 23:05:07.687781770 +0000 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-08 23:05:07.687781770 +0000 2026-03-08T23:05:19.017 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-08 22:58:24.254000000 +0000 2026-03-08T23:05:19.017 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-08T23:05:19.081 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-08T23:05:19.081 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-08T23:05:19.081 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000156142 s, 3.3 MB/s 2026-03-08T23:05:19.082 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-08T23:05:19.137 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdd 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdd 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-08 23:05:08.154781864 +0000 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-08 23:05:07.692781771 +0000 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-08 23:05:07.692781771 +0000 2026-03-08T23:05:19.194 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-08 22:58:24.260000000 +0000 2026-03-08T23:05:19.195 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-08T23:05:19.258 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-08T23:05:19.258 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-08T23:05:19.258 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000142155 s, 3.6 MB/s 2026-03-08T23:05:19.259 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-08T23:05:19.321 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vde 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vde 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-08 23:05:08.239781882 +0000 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-08 23:05:07.695781772 +0000 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-08 23:05:07.695781772 +0000 2026-03-08T23:05:19.381 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-08 22:58:24.263000000 +0000 2026-03-08T23:05:19.381 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-08T23:05:19.447 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-08T23:05:19.448 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-08T23:05:19.448 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000156071 s, 3.3 MB/s 2026-03-08T23:05:19.449 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-08T23:05:19.506 INFO:tasks.cephadm:Deploying osd.0 on vm00 with /dev/vde... 2026-03-08T23:05:19.506 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vde 2026-03-08T23:05:22.192 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:05:22.207 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm00:/dev/vde 2026-03-08T23:05:22.864 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.864 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:22.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:22.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:22.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: Updating vm08:/etc/ceph/ceph.conf 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='client.14266 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:05:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:23 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: Updating vm08:/etc/ceph/ceph.conf 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='client.14266 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:05:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:23 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: Updating vm00:/etc/ceph/ceph.conf 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: Reconfiguring mon.vm00 (unknown last config time)... 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: Reconfiguring daemon mon.vm00 on vm00 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3594022007' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4f19378e-5738-4989-8fed-c2f3af8313ea"}]: dispatch 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3594022007' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4f19378e-5738-4989-8fed-c2f3af8313ea"}]': finished 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: osdmap e6: 1 total, 0 up, 1 in 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:24 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/4100308579' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: Updating vm00:/etc/ceph/ceph.conf 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: Reconfiguring mon.vm00 (unknown last config time)... 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: Reconfiguring daemon mon.vm00 on vm00 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3594022007' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4f19378e-5738-4989-8fed-c2f3af8313ea"}]: dispatch 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3594022007' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4f19378e-5738-4989-8fed-c2f3af8313ea"}]': finished 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: osdmap e6: 1 total, 0 up, 1 in 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:24 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/4100308579' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:27.027 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:26 vm00 ceph-mon[47668]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:26 vm08 ceph-mon[56824]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: Reconfiguring mgr.vm00.pkgtpt (unknown last config time)... 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm00.pkgtpt", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: Reconfiguring daemon mgr.vm00.pkgtpt on vm00 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: Deploying daemon osd.0 on vm00 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: Reconfiguring alertmanager.vm00 (dependencies changed)... 2026-03-08T23:05:28.493 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:28 vm00 ceph-mon[47668]: Reconfiguring daemon alertmanager.vm00 on vm00 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: Reconfiguring mgr.vm00.pkgtpt (unknown last config time)... 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm00.pkgtpt", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: Reconfiguring daemon mgr.vm00.pkgtpt on vm00 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: Deploying daemon osd.0 on vm00 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: Reconfiguring alertmanager.vm00 (dependencies changed)... 2026-03-08T23:05:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:28 vm08 ceph-mon[56824]: Reconfiguring daemon alertmanager.vm00 on vm00 2026-03-08T23:05:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:30 vm00 ceph-mon[47668]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:05:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:30.762 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 0 on host 'vm00' 2026-03-08T23:05:30.852 DEBUG:teuthology.orchestra.run.vm00:osd.0> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.0.service 2026-03-08T23:05:30.854 INFO:tasks.cephadm:Deploying osd.1 on vm00 with /dev/vdd... 2026-03-08T23:05:30.855 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vdd 2026-03-08T23:05:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:30 vm08 ceph-mon[56824]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:05:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:31.266 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 08 23:05:31 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-0[57261]: 2026-03-08T23:05:31.118+0000 7fa218d6b3c0 -1 osd.0 0 log_to_monitors true 2026-03-08T23:05:32.041 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:05:32.064 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:31 vm00 ceph-mon[47668]: Reconfiguring crash.vm00 (monmap changed)... 2026-03-08T23:05:32.064 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:31 vm00 ceph-mon[47668]: Reconfiguring daemon crash.vm00 on vm00 2026-03-08T23:05:32.064 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:31 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:32.064 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:31 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:32.064 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:31 vm00 ceph-mon[47668]: from='osd.0 [v2:192.168.123.100:6802/3982901667,v1:192.168.123.100:6803/3982901667]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:05:32.064 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:31 vm00 ceph-mon[47668]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:05:32.069 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm00:/dev/vdd 2026-03-08T23:05:32.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: Reconfiguring crash.vm00 (monmap changed)... 2026-03-08T23:05:32.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: Reconfiguring daemon crash.vm00 on vm00 2026-03-08T23:05:32.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:32.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:32.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='osd.0 [v2:192.168.123.100:6802/3982901667,v1:192.168.123.100:6803/3982901667]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:05:32.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:05:32.951 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: Reconfiguring grafana.vm00 (dependencies changed)... 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: Reconfiguring grafana.vm00 (dependencies changed)... 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: Reconfiguring daemon grafana.vm00 on vm00 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: osdmap e7: 1 total, 0 up, 1 in 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='osd.0 [v2:192.168.123.100:6802/3982901667,v1:192.168.123.100:6803/3982901667]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.fufswh", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:05:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:33.181 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 08 23:05:33 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-0[57261]: 2026-03-08T23:05:33.070+0000 7fa20f76e700 -1 osd.0 0 waiting for initial osdmap 2026-03-08T23:05:33.181 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 08 23:05:33 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-0[57261]: 2026-03-08T23:05:33.074+0000 7fa209904700 -1 osd.0 8 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:05:33.207 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: Reconfiguring daemon grafana.vm00 on vm00 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: osdmap e7: 1 total, 0 up, 1 in 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='osd.0 [v2:192.168.123.100:6802/3982901667,v1:192.168.123.100:6803/3982901667]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.fufswh", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:05:33.208 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='client.14280 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: Reconfiguring crash.vm08 (monmap changed)... 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: Reconfiguring daemon crash.vm08 on vm08 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: Reconfiguring mgr.vm08.fufswh (monmap changed)... 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: Reconfiguring daemon mgr.vm08.fufswh on vm08 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: osdmap e8: 1 total, 0 up, 1 in 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3551435980' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "623f8038-6c2f-49df-b1ca-6a03a815b1a6"}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: osd.0 [v2:192.168.123.100:6802/3982901667,v1:192.168.123.100:6803/3982901667] boot 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3551435980' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "623f8038-6c2f-49df-b1ca-6a03a815b1a6"}]': finished 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: osdmap e9: 2 total, 1 up, 2 in 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:33 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='client.14280 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: Reconfiguring crash.vm08 (monmap changed)... 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: Reconfiguring daemon crash.vm08 on vm08 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: Reconfiguring mgr.vm08.fufswh (monmap changed)... 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: Reconfiguring daemon mgr.vm08.fufswh on vm08 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: osdmap e8: 1 total, 0 up, 1 in 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3551435980' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "623f8038-6c2f-49df-b1ca-6a03a815b1a6"}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: osd.0 [v2:192.168.123.100:6802/3982901667,v1:192.168.123.100:6803/3982901667] boot 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3551435980' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "623f8038-6c2f-49df-b1ca-6a03a815b1a6"}]': finished 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: osdmap e9: 2 total, 1 up, 2 in 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:34.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:33 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: Reconfiguring mon.vm08 (monmap changed)... 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: Reconfiguring daemon mon.vm08 on vm08 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:05:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:35 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/2716616019' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:05:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:05:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: Reconfiguring mon.vm08 (monmap changed)... 2026-03-08T23:05:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: Reconfiguring daemon mon.vm08 on vm08 2026-03-08T23:05:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:05:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:05:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:35 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/2716616019' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:36.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:36 vm08 ceph-mon[56824]: osdmap e10: 2 total, 1 up, 2 in 2026-03-08T23:05:36.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:36 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:36.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:36 vm08 ceph-mon[56824]: pgmap v13: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:36.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:36 vm00 ceph-mon[47668]: osdmap e10: 2 total, 1 up, 2 in 2026-03-08T23:05:36.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:36 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:36.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:36 vm00 ceph-mon[47668]: pgmap v13: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:37.681 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:37 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-08T23:05:37.681 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:37 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:37 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-08T23:05:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:37 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:38.860 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:38 vm00 ceph-mon[47668]: pgmap v14: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:38.860 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:38 vm00 ceph-mon[47668]: Deploying daemon osd.1 on vm00 2026-03-08T23:05:38.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:38 vm08 ceph-mon[56824]: pgmap v14: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:38.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:38 vm08 ceph-mon[56824]: Deploying daemon osd.1 on vm00 2026-03-08T23:05:39.797 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 1 on host 'vm00' 2026-03-08T23:05:39.904 DEBUG:teuthology.orchestra.run.vm00:osd.1> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.1.service 2026-03-08T23:05:39.907 INFO:tasks.cephadm:Deploying osd.2 on vm00 with /dev/vdc... 2026-03-08T23:05:39.907 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vdc 2026-03-08T23:05:40.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: pgmap v15: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:40.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: pgmap v15: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.100:3000"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.100:9095"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:40.171 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:40.716 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:05:40.731 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm00:/dev/vdc 2026-03-08T23:05:40.946 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 08 23:05:40 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-1[60100]: 2026-03-08T23:05:40.914+0000 7f48fe05d3c0 -1 osd.1 0 log_to_monitors true 2026-03-08T23:05:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:41 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:41 vm00 ceph-mon[47668]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:41 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:41 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:41 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:41 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:41 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:41 vm08 ceph-mon[56824]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:41 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:41 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:41 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:42.226 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:41 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:42 vm00 ceph-mon[47668]: from='client.14298 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:42 vm00 ceph-mon[47668]: pgmap v16: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:42 vm00 ceph-mon[47668]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-08T23:05:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:42 vm00 ceph-mon[47668]: osdmap e11: 2 total, 1 up, 2 in 2026-03-08T23:05:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:42 vm00 ceph-mon[47668]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:42 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:43.088 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 08 23:05:42 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-1[60100]: 2026-03-08T23:05:42.797+0000 7f48f4a60700 -1 osd.1 0 waiting for initial osdmap 2026-03-08T23:05:43.088 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 08 23:05:42 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-1[60100]: 2026-03-08T23:05:42.804+0000 7f48f03f9700 -1 osd.1 12 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:05:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:42 vm08 ceph-mon[56824]: from='client.14298 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:42 vm08 ceph-mon[56824]: pgmap v16: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:42 vm08 ceph-mon[56824]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-08T23:05:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:42 vm08 ceph-mon[56824]: osdmap e11: 2 total, 1 up, 2 in 2026-03-08T23:05:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:42 vm08 ceph-mon[56824]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:42 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/4063750005' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d43872a3-385b-40c9-8132-04eb471428b6"}]: dispatch 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/4063750005' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d43872a3-385b-40c9-8132-04eb471428b6"}]': finished 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: osdmap e12: 3 total, 1 up, 3 in 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:43 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/2796963305' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/4063750005' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d43872a3-385b-40c9-8132-04eb471428b6"}]: dispatch 2026-03-08T23:05:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:05:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/4063750005' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d43872a3-385b-40c9-8132-04eb471428b6"}]': finished 2026-03-08T23:05:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: osdmap e12: 3 total, 1 up, 3 in 2026-03-08T23:05:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:44.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:44.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:44.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/2796963305' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816] boot 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: osdmap e13: 3 total, 2 up, 3 in 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:44 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: osd.1 [v2:192.168.123.100:6810/1974704816,v1:192.168.123.100:6811/1974704816] boot 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: osdmap e13: 3 total, 2 up, 3 in 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:05:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:44 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:46.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:46 vm08 ceph-mon[56824]: osdmap e14: 3 total, 2 up, 3 in 2026-03-08T23:05:46.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:46 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:46.385 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:46 vm00 ceph-mon[47668]: osdmap e14: 3 total, 2 up, 3 in 2026-03-08T23:05:46.385 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:46 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:47 vm00 ceph-mon[47668]: pgmap v22: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:47 vm00 ceph-mon[47668]: Detected new or changed devices on vm00 2026-03-08T23:05:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:47 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:47 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:47 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:47 vm08 ceph-mon[56824]: pgmap v22: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:47 vm08 ceph-mon[56824]: Detected new or changed devices on vm00 2026-03-08T23:05:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:47 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:47 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:47 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:48.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:48 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-08T23:05:48.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:48 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:48.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:48 vm00 ceph-mon[47668]: Deploying daemon osd.2 on vm00 2026-03-08T23:05:48.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:48 vm00 ceph-mon[47668]: pgmap v23: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:48.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:48 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-08T23:05:48.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:48 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:48.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:48 vm08 ceph-mon[56824]: Deploying daemon osd.2 on vm00 2026-03-08T23:05:48.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:48 vm08 ceph-mon[56824]: pgmap v23: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:50.336 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 2 on host 'vm00' 2026-03-08T23:05:50.405 DEBUG:teuthology.orchestra.run.vm00:osd.2> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.2.service 2026-03-08T23:05:50.409 INFO:tasks.cephadm:Deploying osd.3 on vm00 with /dev/vdb... 2026-03-08T23:05:50.409 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vdb 2026-03-08T23:05:50.447 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:50 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:50.447 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:50 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:50.447 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:50 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:50.447 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:50 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:50.447 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:50 vm00 ceph-mon[47668]: pgmap v24: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:50.447 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:50 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:50 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:50 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:50 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:50 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:50 vm08 ceph-mon[56824]: pgmap v24: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:50 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:51.040 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 08 23:05:50 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-2[63052]: 2026-03-08T23:05:50.930+0000 7ffa57dc53c0 -1 osd.2 0 log_to_monitors true 2026-03-08T23:05:51.199 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:05:51.212 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm00:/dev/vdb 2026-03-08T23:05:51.341 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:51 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:51.341 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:51 vm00 ceph-mon[47668]: from='osd.2 [v2:192.168.123.100:6818/625312955,v1:192.168.123.100:6819/625312955]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:05:51.341 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:51 vm00 ceph-mon[47668]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:05:51.341 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:51 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:51 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:51 vm08 ceph-mon[56824]: from='osd.2 [v2:192.168.123.100:6818/625312955,v1:192.168.123.100:6819/625312955]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:05:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:51 vm08 ceph-mon[56824]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:05:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:51 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: osdmap e15: 3 total, 2 up, 3 in 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='osd.2 [v2:192.168.123.100:6818/625312955,v1:192.168.123.100:6819/625312955]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: pgmap v26: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='client.14312 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:52.417 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:52.418 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:52.418 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:52 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:52.418 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 08 23:05:52 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-2[63052]: 2026-03-08T23:05:52.351+0000 7ffa4ffcb700 -1 osd.2 0 waiting for initial osdmap 2026-03-08T23:05:52.418 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 08 23:05:52 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-2[63052]: 2026-03-08T23:05:52.364+0000 7ffa4b163700 -1 osd.2 16 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: osdmap e15: 3 total, 2 up, 3 in 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='osd.2 [v2:192.168.123.100:6818/625312955,v1:192.168.123.100:6819/625312955]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: pgmap v26: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='client.14312 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:52.601 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:52 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:05:53.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: osdmap e16: 3 total, 2 up, 3 in 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/2455298858' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84"}]: dispatch 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: osd.2 [v2:192.168.123.100:6818/625312955,v1:192.168.123.100:6819/625312955] boot 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/2455298858' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84"}]': finished 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: osdmap e17: 4 total, 3 up, 4 in 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:53.532 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:53 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: osdmap e16: 3 total, 2 up, 3 in 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/2455298858' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84"}]: dispatch 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: osd.2 [v2:192.168.123.100:6818/625312955,v1:192.168.123.100:6819/625312955] boot 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/2455298858' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84"}]': finished 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: osdmap e17: 4 total, 3 up, 4 in 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:53 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3373269039' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: pgmap v29: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: osdmap e18: 4 total, 3 up, 4 in 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:54.514 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:54 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3373269039' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: pgmap v29: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: osdmap e18: 4 total, 3 up, 4 in 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:54.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:54 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-08T23:05:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:55 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:55 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-08T23:05:55.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:55 vm00 ceph-mon[47668]: osdmap e19: 4 total, 3 up, 4 in 2026-03-08T23:05:55.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:55 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:56.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:55 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:56.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:55 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-08T23:05:56.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:55 vm08 ceph-mon[56824]: osdmap e19: 4 total, 3 up, 4 in 2026-03-08T23:05:56.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:55 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:56.802 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:56 vm00 ceph-mon[47668]: pgmap v32: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:05:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:56 vm08 ceph-mon[56824]: pgmap v32: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:05:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:57 vm00 ceph-mon[47668]: osdmap e20: 4 total, 3 up, 4 in 2026-03-08T23:05:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:57 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:57 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:57 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:58.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:57 vm08 ceph-mon[56824]: osdmap e20: 4 total, 3 up, 4 in 2026-03-08T23:05:58.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:57 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:05:58.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:57 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:58.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:57 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:58.590 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66402]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-08T23:05:58.590 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66402]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:05:58.590 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66402]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:05:58.590 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66339]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-08T23:05:58.590 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66339]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:05:58.590 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66339]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:05:58.863 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 ceph-mon[47668]: Detected new or changed devices on vm00 2026-03-08T23:05:58.863 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 ceph-mon[47668]: pgmap v34: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:05:58.863 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-08T23:05:58.863 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:58.863 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 ceph-mon[47668]: Deploying daemon osd.3 on vm00 2026-03-08T23:05:58.863 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66439]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-08T23:05:58.863 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66439]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:05:58.863 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66439]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:05:59.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:58 vm08 ceph-mon[56824]: Detected new or changed devices on vm00 2026-03-08T23:05:59.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:58 vm08 ceph-mon[56824]: pgmap v34: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:05:59.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:58 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-08T23:05:59.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:58 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:59.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:58 vm08 ceph-mon[56824]: Deploying daemon osd.3 on vm00 2026-03-08T23:05:59.044 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 sudo[58947]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-08T23:05:59.047 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 sudo[58947]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:05:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66481]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-08T23:05:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66481]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:05:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:58 vm00 sudo[66481]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:05:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 sudo[58947]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:05:59.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:05:59.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:05:59.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:05:59.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:59.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:05:59.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:05:59.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:05:59.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:05:59.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:05:59.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:05:59.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:05:59.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:05:59 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:05:59 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:00.521 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 3 on host 'vm00' 2026-03-08T23:06:00.592 DEBUG:teuthology.orchestra.run.vm00:osd.3> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.3.service 2026-03-08T23:06:00.593 INFO:tasks.cephadm:Deploying osd.4 on vm08 with /dev/vde... 2026-03-08T23:06:00.594 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vde 2026-03-08T23:06:00.860 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:00 vm00 ceph-mon[47668]: pgmap v35: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:06:00.860 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:00 vm00 ceph-mon[47668]: mgrmap e19: vm00.pkgtpt(active, since 54s), standbys: vm08.fufswh 2026-03-08T23:06:00.860 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:00 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:00.869 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:00 vm08 ceph-mon[56824]: pgmap v35: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:06:00.869 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:00 vm08 ceph-mon[56824]: mgrmap e19: vm00.pkgtpt(active, since 54s), standbys: vm08.fufswh 2026-03-08T23:06:00.869 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:00 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:01.188 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:06:01.202 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm08:/dev/vde 2026-03-08T23:06:01.377 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 08 23:06:01 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-3[66605]: 2026-03-08T23:06:01.204+0000 7f7c809013c0 -1 osd.3 0 log_to_monitors true 2026-03-08T23:06:02.530 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:02.530 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:06:02.530 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: pgmap v36: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: from='client.24161 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: pgmap v36: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: from='client.24161 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:02.531 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:03.179 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 08 23:06:03 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-3[66605]: 2026-03-08T23:06:03.037+0000 7f7c77304700 -1 osd.3 0 waiting for initial osdmap 2026-03-08T23:06:03.179 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 08 23:06:03 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-3[66605]: 2026-03-08T23:06:03.055+0000 7f7c73c9f700 -1 osd.3 22 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:06:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-08T23:06:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: osdmap e21: 4 total, 3 up, 4 in 2026-03-08T23:06:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:06:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/1490643378' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd987b29-9261-4c05-b392-d5fcc1540ab1"}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd987b29-9261-4c05-b392-d5fcc1540ab1"}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bd987b29-9261-4c05-b392-d5fcc1540ab1"}]': finished 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: osdmap e22: 5 total, 3 up, 5 in 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:03 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/1076710333' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: osdmap e21: 4 total, 3 up, 4 in 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1490643378' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd987b29-9261-4c05-b392-d5fcc1540ab1"}]: dispatch 2026-03-08T23:06:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd987b29-9261-4c05-b392-d5fcc1540ab1"}]: dispatch 2026-03-08T23:06:03.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-08T23:06:03.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bd987b29-9261-4c05-b392-d5fcc1540ab1"}]': finished 2026-03-08T23:06:03.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: osdmap e22: 5 total, 3 up, 5 in 2026-03-08T23:06:03.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:06:03.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:03.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:03 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1076710333' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 18 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215] boot 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: osdmap e23: 5 total, 4 up, 5 in 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:06:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:06:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:06:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 18 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:06:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: osd.3 [v2:192.168.123.100:6826/3746026215,v1:192.168.123.100:6827/3746026215] boot 2026-03-08T23:06:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: osdmap e23: 5 total, 4 up, 5 in 2026-03-08T23:06:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:06:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:06 vm08 ceph-mon[56824]: osdmap e24: 5 total, 4 up, 5 in 2026-03-08T23:06:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:06 vm08 ceph-mon[56824]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:06:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:06:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:06 vm00 ceph-mon[47668]: osdmap e24: 5 total, 4 up, 5 in 2026-03-08T23:06:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:06 vm00 ceph-mon[47668]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:06:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:06:07.305 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:07 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-08T23:06:07.305 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:07 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:07.305 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:07 vm08 ceph-mon[56824]: Deploying daemon osd.4 on vm08 2026-03-08T23:06:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-08T23:06:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:07 vm00 ceph-mon[47668]: Deploying daemon osd.4 on vm08 2026-03-08T23:06:08.259 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:08 vm08 ceph-mon[56824]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:08.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:08 vm00 ceph-mon[47668]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:09.016 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 4 on host 'vm08' 2026-03-08T23:06:09.068 DEBUG:teuthology.orchestra.run.vm08:osd.4> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.4.service 2026-03-08T23:06:09.069 INFO:tasks.cephadm:Deploying osd.5 on vm08 with /dev/vdd... 2026-03-08T23:06:09.069 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vdd 2026-03-08T23:06:09.401 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:09.401 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:09.401 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:09.401 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:09.401 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:09.401 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:09.449 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:09.449 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:09.449 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:09.449 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:09.449 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:09.449 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:09.803 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:06:09.818 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm08:/dev/vdd 2026-03-08T23:06:10.185 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 08 23:06:10 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-4[60686]: 2026-03-08T23:06:10.055+0000 7f82c71563c0 -1 osd.4 0 log_to_monitors true 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: from='osd.4 [v2:192.168.123.108:6800/1320855616,v1:192.168.123.108:6801/1320855616]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:10.580 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: from='osd.4 [v2:192.168.123.108:6800/1320855616,v1:192.168.123.108:6801/1320855616]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:11.378 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 08 23:06:11 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-4[60686]: 2026-03-08T23:06:11.166+0000 7f82bdb59700 -1 osd.4 0 waiting for initial osdmap 2026-03-08T23:06:11.378 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 08 23:06:11 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-4[60686]: 2026-03-08T23:06:11.176+0000 7f82b7cef700 -1 osd.4 26 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='client.14352 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: osdmap e25: 5 total, 4 up, 5 in 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='osd.4 [v2:192.168.123.108:6800/1320855616,v1:192.168.123.108:6801/1320855616]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: Detected new or changed devices on vm08 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/3993700313' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bfe25307-0f2e-4a7c-bbba-a5f33748435b"}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bfe25307-0f2e-4a7c-bbba-a5f33748435b"}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bfe25307-0f2e-4a7c-bbba-a5f33748435b"}]': finished 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: osdmap e26: 6 total, 4 up, 6 in 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:11.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='client.14352 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: osdmap e25: 5 total, 4 up, 5 in 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='osd.4 [v2:192.168.123.108:6800/1320855616,v1:192.168.123.108:6801/1320855616]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: Detected new or changed devices on vm08 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/3993700313' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bfe25307-0f2e-4a7c-bbba-a5f33748435b"}]: dispatch 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bfe25307-0f2e-4a7c-bbba-a5f33748435b"}]: dispatch 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bfe25307-0f2e-4a7c-bbba-a5f33748435b"}]': finished 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: osdmap e26: 6 total, 4 up, 6 in 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:12 vm00 ceph-mon[47668]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:12.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:12 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/4052996489' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:12.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:12 vm00 ceph-mon[47668]: osd.4 [v2:192.168.123.108:6800/1320855616,v1:192.168.123.108:6801/1320855616] boot 2026-03-08T23:06:12.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:12 vm00 ceph-mon[47668]: osdmap e27: 6 total, 5 up, 6 in 2026-03-08T23:06:12.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:12.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:12 vm08 ceph-mon[56824]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:06:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:12 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/4052996489' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:12 vm08 ceph-mon[56824]: osd.4 [v2:192.168.123.108:6800/1320855616,v1:192.168.123.108:6801/1320855616] boot 2026-03-08T23:06:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:12 vm08 ceph-mon[56824]: osdmap e27: 6 total, 5 up, 6 in 2026-03-08T23:06:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:06:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:13 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:06:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:13 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:06:14.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:13 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:06:14.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:13 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:06:15.057 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:14 vm08 ceph-mon[56824]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:06:15.057 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:14 vm08 ceph-mon[56824]: osdmap e28: 6 total, 5 up, 6 in 2026-03-08T23:06:15.057 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:15.057 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:14 vm08 ceph-mon[56824]: osdmap e29: 6 total, 5 up, 6 in 2026-03-08T23:06:15.057 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:14 vm00 ceph-mon[47668]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:06:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:14 vm00 ceph-mon[47668]: osdmap e28: 6 total, 5 up, 6 in 2026-03-08T23:06:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:14 vm00 ceph-mon[47668]: osdmap e29: 6 total, 5 up, 6 in 2026-03-08T23:06:15.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:16.002 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-08T23:06:16.002 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:16.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-08T23:06:16.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:17.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:16 vm08 ceph-mon[56824]: Deploying daemon osd.5 on vm08 2026-03-08T23:06:17.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:16 vm08 ceph-mon[56824]: pgmap v52: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-08T23:06:17.043 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:16 vm08 ceph-mon[56824]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-08T23:06:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:16 vm00 ceph-mon[47668]: Deploying daemon osd.5 on vm08 2026-03-08T23:06:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:16 vm00 ceph-mon[47668]: pgmap v52: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-08T23:06:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:16 vm00 ceph-mon[47668]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-08T23:06:17.885 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 5 on host 'vm08' 2026-03-08T23:06:17.952 DEBUG:teuthology.orchestra.run.vm08:osd.5> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.5.service 2026-03-08T23:06:17.953 INFO:tasks.cephadm:Deploying osd.6 on vm08 with /dev/vdc... 2026-03-08T23:06:17.953 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vdc 2026-03-08T23:06:18.282 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:18.283 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:18.283 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:18.283 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:18.283 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: pgmap v53: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-08T23:06:18.283 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:18.283 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:18.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:18.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:18.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:18.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:18.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: pgmap v53: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-08T23:06:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:18.693 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:06:18.706 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm08:/dev/vdc 2026-03-08T23:06:18.909 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 08 23:06:18 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-5[63530]: 2026-03-08T23:06:18.662+0000 7f941a2343c0 -1 osd.5 0 log_to_monitors true 2026-03-08T23:06:19.524 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:19 vm08 ceph-mon[56824]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:06:19.524 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:19.524 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:19.524 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:19 vm00 ceph-mon[47668]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:06:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:20.430 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 08 23:06:20 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-5[63530]: 2026-03-08T23:06:20.134+0000 7f9410c37700 -1 osd.5 0 waiting for initial osdmap 2026-03-08T23:06:20.430 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 08 23:06:20 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-5[63530]: 2026-03-08T23:06:20.142+0000 7f940adcd700 -1 osd.5 31 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:06:20.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='client.24193 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: osdmap e30: 6 total, 5 up, 6 in 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: pgmap v55: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: Detected new or changed devices on vm08 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/775865408' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "aa0647b4-5cb3-4eba-8664-5989f44fb9e0"}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "aa0647b4-5cb3-4eba-8664-5989f44fb9e0"}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "aa0647b4-5cb3-4eba-8664-5989f44fb9e0"}]': finished 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: osdmap e31: 7 total, 5 up, 7 in 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:20.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='client.24193 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: osdmap e30: 6 total, 5 up, 6 in 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: pgmap v55: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: Detected new or changed devices on vm08 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/775865408' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "aa0647b4-5cb3-4eba-8664-5989f44fb9e0"}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "aa0647b4-5cb3-4eba-8664-5989f44fb9e0"}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "aa0647b4-5cb3-4eba-8664-5989f44fb9e0"}]': finished 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: osdmap e31: 7 total, 5 up, 7 in 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:21 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:21 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/1983899638' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:21 vm08 ceph-mon[56824]: osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074] boot 2026-03-08T23:06:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:21 vm08 ceph-mon[56824]: osdmap e32: 7 total, 6 up, 7 in 2026-03-08T23:06:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:21 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:21 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:21 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:21 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/1983899638' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:21 vm00 ceph-mon[47668]: osd.5 [v2:192.168.123.108:6808/2980381074,v1:192.168.123.108:6809/2980381074] boot 2026-03-08T23:06:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:21 vm00 ceph-mon[47668]: osdmap e32: 7 total, 6 up, 7 in 2026-03-08T23:06:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:21 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:06:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:21 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: Cluster is now healthy 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: osdmap e33: 7 total, 6 up, 7 in 2026-03-08T23:06:22.806 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:06:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:06:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-08T23:06:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-08T23:06:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: Cluster is now healthy 2026-03-08T23:06:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: osdmap e33: 7 total, 6 up, 7 in 2026-03-08T23:06:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:24.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:24 vm00 ceph-mon[47668]: osdmap e34: 7 total, 6 up, 7 in 2026-03-08T23:06:24.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:24 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:24.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:24 vm00 ceph-mon[47668]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 97 KiB/s, 0 objects/s recovering 2026-03-08T23:06:24.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:24 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-08T23:06:24.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:24 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:24.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:24 vm00 ceph-mon[47668]: Deploying daemon osd.6 on vm08 2026-03-08T23:06:24.469 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:24 vm08 ceph-mon[56824]: osdmap e34: 7 total, 6 up, 7 in 2026-03-08T23:06:24.469 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:24 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:24.469 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:24 vm08 ceph-mon[56824]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 97 KiB/s, 0 objects/s recovering 2026-03-08T23:06:24.469 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:24 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-08T23:06:24.469 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:24 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:24.469 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:24 vm08 ceph-mon[56824]: Deploying daemon osd.6 on vm08 2026-03-08T23:06:27.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:26 vm08 ceph-mon[56824]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:06:27.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:26 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:27.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:26 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:27.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:26 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:27.129 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:26 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:26 vm00 ceph-mon[47668]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:06:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:26 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:26 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:26 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:26 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:27.240 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 6 on host 'vm08' 2026-03-08T23:06:27.432 DEBUG:teuthology.orchestra.run.vm08:osd.6> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.6.service 2026-03-08T23:06:27.435 INFO:tasks.cephadm:Deploying osd.7 on vm08 with /dev/vdb... 2026-03-08T23:06:27.435 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- lvm zap /dev/vdb 2026-03-08T23:06:27.689 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 08 23:06:27 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-6[66327]: 2026-03-08T23:06:27.555+0000 7f483cae13c0 -1 osd.6 0 log_to_monitors true 2026-03-08T23:06:28.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:28.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:28 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:28.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:28 vm08 ceph-mon[56824]: pgmap v63: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:06:28.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:28 vm08 ceph-mon[56824]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:06:28.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:28.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:28 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:28.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:28 vm00 ceph-mon[47668]: pgmap v63: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:06:28.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:28 vm00 ceph-mon[47668]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:06:29.148 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-08T23:06:29.162 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch daemon add osd vm08:/dev/vdb 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: osdmap e35: 7 total, 6 up, 7 in 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: Detected new or changed devices on vm08 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:29.344 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:29 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:29.344 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 08 23:06:29 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-6[66327]: 2026-03-08T23:06:29.289+0000 7f4834ce7700 -1 osd.6 0 waiting for initial osdmap 2026-03-08T23:06:29.344 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 08 23:06:29 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-6[66327]: 2026-03-08T23:06:29.303+0000 7f482de7b700 -1 osd.6 36 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: osdmap e35: 7 total, 6 up, 7 in 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: Detected new or changed devices on vm08 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:29 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: osdmap e36: 7 total, 6 up, 7 in 2026-03-08T23:06:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:06:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092] boot 2026-03-08T23:06:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: osdmap e37: 7 total, 7 up, 7 in 2026-03-08T23:06:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:30 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: osdmap e36: 7 total, 6 up, 7 in 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: osd.6 [v2:192.168.123.108:6816/2927703092,v1:192.168.123.108:6817/2927703092] boot 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: osdmap e37: 7 total, 7 up, 7 in 2026-03-08T23:06:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:30 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/895224384' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e08cf409-8102-4b49-ba01-29bb4d30e0ef"}]: dispatch 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e08cf409-8102-4b49-ba01-29bb4d30e0ef"}]: dispatch 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e08cf409-8102-4b49-ba01-29bb4d30e0ef"}]': finished 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: osdmap e38: 8 total, 7 up, 8 in 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:31 vm08 ceph-mon[56824]: from='client.? 192.168.123.108:0/834406512' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/895224384' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e08cf409-8102-4b49-ba01-29bb4d30e0ef"}]: dispatch 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e08cf409-8102-4b49-ba01-29bb4d30e0ef"}]: dispatch 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e08cf409-8102-4b49-ba01-29bb4d30e0ef"}]': finished 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: osdmap e38: 8 total, 7 up, 8 in 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:31 vm00 ceph-mon[47668]: from='client.? 192.168.123.108:0/834406512' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:06:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:32 vm08 ceph-mon[56824]: pgmap v69: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:32 vm08 ceph-mon[56824]: osdmap e39: 8 total, 7 up, 8 in 2026-03-08T23:06:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:32 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:32 vm00 ceph-mon[47668]: pgmap v69: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:32 vm00 ceph-mon[47668]: osdmap e39: 8 total, 7 up, 8 in 2026-03-08T23:06:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:32 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:34.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:34 vm08 ceph-mon[56824]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:34.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:34 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-08T23:06:34.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:34 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:34 vm00 ceph-mon[47668]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:34 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-08T23:06:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:34 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:35 vm08 ceph-mon[56824]: Deploying daemon osd.7 on vm08 2026-03-08T23:06:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:35 vm00 ceph-mon[47668]: Deploying daemon osd.7 on vm08 2026-03-08T23:06:36.566 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:36 vm08 ceph-mon[56824]: pgmap v72: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:36.566 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:36 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:36.566 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:36 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:36.566 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:36 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:36.566 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:36 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:36.566 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:36 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:36 vm00 ceph-mon[47668]: pgmap v72: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:36 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:36 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:36 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:36.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:36 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:36.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:36 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:37.344 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 7 on host 'vm08' 2026-03-08T23:06:37.402 DEBUG:teuthology.orchestra.run.vm08:osd.7> sudo journalctl -f -n 0 -u ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae@osd.7.service 2026-03-08T23:06:37.404 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-08T23:06:37.404 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd stat -f json 2026-03-08T23:06:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:37 vm08 ceph-mon[56824]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:06:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:37 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:37 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:37.708 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:37 vm00 ceph-mon[47668]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:06:37.709 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:37 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:37.709 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:37 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:37.930 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:38.022 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":40,"num_osds":8,"num_up_osds":7,"osd_up_since":1773011190,"num_in_osds":8,"osd_in_since":1773011190,"num_remapped_pgs":0} 2026-03-08T23:06:38.878 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 08 23:06:38 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-7[69149]: 2026-03-08T23:06:38.525+0000 7f8a2ddca700 -1 osd.7 0 waiting for initial osdmap 2026-03-08T23:06:38.878 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 08 23:06:38 vm08 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-osd-7[69149]: 2026-03-08T23:06:38.565+0000 7f8a2675d700 -1 osd.7 41 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:06:38.879 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:38 vm08 ceph-mon[56824]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:38.879 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:38 vm08 ceph-mon[56824]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-08T23:06:38.879 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:38 vm08 ceph-mon[56824]: osdmap e40: 8 total, 7 up, 8 in 2026-03-08T23:06:38.879 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:38 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:38.879 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:38 vm08 ceph-mon[56824]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:38.879 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:38 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/335800497' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:06:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:38 vm00 ceph-mon[47668]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:38 vm00 ceph-mon[47668]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-08T23:06:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:38 vm00 ceph-mon[47668]: osdmap e40: 8 total, 7 up, 8 in 2026-03-08T23:06:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:38 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:38.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:38 vm00 ceph-mon[47668]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-08T23:06:38.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:38 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/335800497' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:06:39.023 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd stat -f json 2026-03-08T23:06:39.501 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:39.556 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:39.556 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: osdmap e41: 8 total, 7 up, 8 in 2026-03-08T23:06:39.556 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: Detected new or changed devices on vm08 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/4079812449' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:06:39.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:39 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:39.565 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":41,"num_osds":8,"num_up_osds":7,"osd_up_since":1773011190,"num_in_osds":8,"osd_in_since":1773011190,"num_remapped_pgs":0} 2026-03-08T23:06:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-08T23:06:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: osdmap e41: 8 total, 7 up, 8 in 2026-03-08T23:06:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: Detected new or changed devices on vm08 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/4079812449' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:39 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:40.566 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd stat -f json 2026-03-08T23:06:40.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:40 vm00 ceph-mon[47668]: purged_snaps scrub starts 2026-03-08T23:06:40.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:40 vm00 ceph-mon[47668]: purged_snaps scrub ok 2026-03-08T23:06:40.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:40 vm00 ceph-mon[47668]: pgmap v76: 1 pgs: 1 peering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:40.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:40 vm00 ceph-mon[47668]: osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373] boot 2026-03-08T23:06:40.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:40 vm00 ceph-mon[47668]: osdmap e42: 8 total, 8 up, 8 in 2026-03-08T23:06:40.837 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:40 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:40 vm08 ceph-mon[56824]: purged_snaps scrub starts 2026-03-08T23:06:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:40 vm08 ceph-mon[56824]: purged_snaps scrub ok 2026-03-08T23:06:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:40 vm08 ceph-mon[56824]: pgmap v76: 1 pgs: 1 peering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:06:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:40 vm08 ceph-mon[56824]: osd.7 [v2:192.168.123.108:6824/4271319373,v1:192.168.123.108:6825/4271319373] boot 2026-03-08T23:06:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:40 vm08 ceph-mon[56824]: osdmap e42: 8 total, 8 up, 8 in 2026-03-08T23:06:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:40 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:06:41.058 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:41.132 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":43,"num_osds":8,"num_up_osds":8,"osd_up_since":1773011199,"num_in_osds":8,"osd_in_since":1773011190,"num_remapped_pgs":0} 2026-03-08T23:06:41.132 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd dump --format=json 2026-03-08T23:06:41.277 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:41.597 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:41 vm00 ceph-mon[47668]: osdmap e43: 8 total, 8 up, 8 in 2026-03-08T23:06:41.598 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:41 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3788986492' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:06:41.598 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:41 vm00 ceph-mon[47668]: osdmap e44: 8 total, 8 up, 8 in 2026-03-08T23:06:41.598 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:41.598 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":44,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","created":"2026-03-08T23:03:46.650474+0000","modified":"2026-03-08T23:06:41.553403+0000","last_up_change":"2026-03-08T23:06:39.534209+0000","last_in_change":"2026-03-08T23:06:30.452085+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-08T23:05:53.643147+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"4f19378e-5738-4989-8fed-c2f3af8313ea","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":9,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6803","nonce":3982901667}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6805","nonce":3982901667}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6809","nonce":3982901667}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6807","nonce":3982901667}]},"public_addr":"192.168.123.100:6803/3982901667","cluster_addr":"192.168.123.100:6805/3982901667","heartbeat_back_addr":"192.168.123.100:6809/3982901667","heartbeat_front_addr":"192.168.123.100:6807/3982901667","state":["exists","up"]},{"osd":1,"uuid":"623f8038-6c2f-49df-b1ca-6a03a815b1a6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":13,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6811","nonce":1974704816}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6813","nonce":1974704816}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6817","nonce":1974704816}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6815","nonce":1974704816}]},"public_addr":"192.168.123.100:6811/1974704816","cluster_addr":"192.168.123.100:6813/1974704816","heartbeat_back_addr":"192.168.123.100:6817/1974704816","heartbeat_front_addr":"192.168.123.100:6815/1974704816","state":["exists","up"]},{"osd":2,"uuid":"d43872a3-385b-40c9-8132-04eb471428b6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6819","nonce":625312955}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6821","nonce":625312955}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6825","nonce":625312955}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6823","nonce":625312955}]},"public_addr":"192.168.123.100:6819/625312955","cluster_addr":"192.168.123.100:6821/625312955","heartbeat_back_addr":"192.168.123.100:6825/625312955","heartbeat_front_addr":"192.168.123.100:6823/625312955","state":["exists","up"]},{"osd":3,"uuid":"d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6827","nonce":3746026215}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6829","nonce":3746026215}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6833","nonce":3746026215}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6831","nonce":3746026215}]},"public_addr":"192.168.123.100:6827/3746026215","cluster_addr":"192.168.123.100:6829/3746026215","heartbeat_back_addr":"192.168.123.100:6833/3746026215","heartbeat_front_addr":"192.168.123.100:6831/3746026215","state":["exists","up"]},{"osd":4,"uuid":"bd987b29-9261-4c05-b392-d5fcc1540ab1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6801","nonce":1320855616}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6803","nonce":1320855616}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6807","nonce":1320855616}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6805","nonce":1320855616}]},"public_addr":"192.168.123.108:6801/1320855616","cluster_addr":"192.168.123.108:6803/1320855616","heartbeat_back_addr":"192.168.123.108:6807/1320855616","heartbeat_front_addr":"192.168.123.108:6805/1320855616","state":["exists","up"]},{"osd":5,"uuid":"bfe25307-0f2e-4a7c-bbba-a5f33748435b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":32,"up_thru":33,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6809","nonce":2980381074}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6811","nonce":2980381074}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6815","nonce":2980381074}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6813","nonce":2980381074}]},"public_addr":"192.168.123.108:6809/2980381074","cluster_addr":"192.168.123.108:6811/2980381074","heartbeat_back_addr":"192.168.123.108:6815/2980381074","heartbeat_front_addr":"192.168.123.108:6813/2980381074","state":["exists","up"]},{"osd":6,"uuid":"aa0647b4-5cb3-4eba-8664-5989f44fb9e0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":37,"up_thru":38,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6817","nonce":2927703092}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6819","nonce":2927703092}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6823","nonce":2927703092}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6821","nonce":2927703092}]},"public_addr":"192.168.123.108:6817/2927703092","cluster_addr":"192.168.123.108:6819/2927703092","heartbeat_back_addr":"192.168.123.108:6823/2927703092","heartbeat_front_addr":"192.168.123.108:6821/2927703092","state":["exists","up"]},{"osd":7,"uuid":"e08cf409-8102-4b49-ba01-29bb4d30e0ef","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":42,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6825","nonce":4271319373}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6827","nonce":4271319373}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6831","nonce":4271319373}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6829","nonce":4271319373}]},"public_addr":"192.168.123.108:6825/4271319373","cluster_addr":"192.168.123.108:6827/4271319373","heartbeat_back_addr":"192.168.123.108:6831/4271319373","heartbeat_front_addr":"192.168.123.108:6829/4271319373","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:32.073700+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:41.923817+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:51.959246+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:02.221419+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:11.019398+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:19.680092+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:28.578339+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:37.979084+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:6800/2139829498":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/3378709406":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/4050405788":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/1222041110":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/1166590403":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6801/3468375714":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6800/3468375714":"2026-03-09T23:04:13.497891+0000","192.168.123.100:0/1667994189":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6800/1912337238":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/2106940531":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6801/1912337238":"2026-03-09T23:04:01.150333+0000","192.168.123.100:6801/2139829498":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/3693847658":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/154522868":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/2635829435":"2026-03-09T23:04:01.150333+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-08T23:06:41.663 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-08T23:05:53.643147+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '20', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-08T23:06:41.663 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd pool get .mgr pg_num 2026-03-08T23:06:41.823 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:41 vm08 ceph-mon[56824]: osdmap e43: 8 total, 8 up, 8 in 2026-03-08T23:06:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:41 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3788986492' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:06:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:41 vm08 ceph-mon[56824]: osdmap e44: 8 total, 8 up, 8 in 2026-03-08T23:06:42.184 INFO:teuthology.orchestra.run.vm00.stdout:pg_num: 1 2026-03-08T23:06:42.245 INFO:tasks.cephadm:Setting up client nodes... 2026-03-08T23:06:42.246 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-08T23:06:42.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:42 vm00 ceph-mon[47668]: pgmap v79: 1 pgs: 1 peering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:06:42.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:42 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3469690169' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:06:42.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:42 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3428458682' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-08T23:06:42.805 INFO:teuthology.orchestra.run.vm00.stdout:[client.0] 2026-03-08T23:06:42.805 INFO:teuthology.orchestra.run.vm00.stdout: key = AQACAa5ps598LxAAgVuMiMX+qPGddLLIQxjEgg== 2026-03-08T23:06:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:42 vm08 ceph-mon[56824]: pgmap v79: 1 pgs: 1 peering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:06:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:42 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3469690169' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:06:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:42 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3428458682' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-08T23:06:42.892 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-08T23:06:42.893 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-08T23:06:42.893 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-08T23:06:42.933 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-08T23:06:42.934 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-08T23:06:42.934 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph mgr dump --format=json 2026-03-08T23:06:43.134 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:43.522 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:43.573 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":19,"active_gid":14236,"active_name":"vm00.pkgtpt","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6800","nonce":1655534837},{"type":"v1","addr":"192.168.123.100:6801","nonce":1655534837}]},"active_addr":"192.168.123.100:6801/1655534837","active_change":"2026-03-08T23:05:05.366673+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":14256,"name":"vm08.fufswh","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.100:8443/","prometheus":"http://192.168.123.100:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":5,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":1631657693}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":1679115479}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":1277373324}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":744088072}]}]}} 2026-03-08T23:06:43.575 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-08T23:06:43.575 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-08T23:06:43.575 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd dump --format=json 2026-03-08T23:06:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/655338163' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:06:43.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/655338163' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:06:43.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:43 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/3043967120' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:06:43.745 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:43 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/655338163' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:06:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:43 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/655338163' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:06:43.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:43 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/3043967120' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:06:44.104 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:44.104 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":44,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","created":"2026-03-08T23:03:46.650474+0000","modified":"2026-03-08T23:06:41.553403+0000","last_up_change":"2026-03-08T23:06:39.534209+0000","last_in_change":"2026-03-08T23:06:30.452085+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-08T23:05:53.643147+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"4f19378e-5738-4989-8fed-c2f3af8313ea","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":9,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6803","nonce":3982901667}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6805","nonce":3982901667}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6809","nonce":3982901667}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6807","nonce":3982901667}]},"public_addr":"192.168.123.100:6803/3982901667","cluster_addr":"192.168.123.100:6805/3982901667","heartbeat_back_addr":"192.168.123.100:6809/3982901667","heartbeat_front_addr":"192.168.123.100:6807/3982901667","state":["exists","up"]},{"osd":1,"uuid":"623f8038-6c2f-49df-b1ca-6a03a815b1a6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":13,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6811","nonce":1974704816}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6813","nonce":1974704816}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6817","nonce":1974704816}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6815","nonce":1974704816}]},"public_addr":"192.168.123.100:6811/1974704816","cluster_addr":"192.168.123.100:6813/1974704816","heartbeat_back_addr":"192.168.123.100:6817/1974704816","heartbeat_front_addr":"192.168.123.100:6815/1974704816","state":["exists","up"]},{"osd":2,"uuid":"d43872a3-385b-40c9-8132-04eb471428b6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6819","nonce":625312955}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6821","nonce":625312955}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6825","nonce":625312955}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6823","nonce":625312955}]},"public_addr":"192.168.123.100:6819/625312955","cluster_addr":"192.168.123.100:6821/625312955","heartbeat_back_addr":"192.168.123.100:6825/625312955","heartbeat_front_addr":"192.168.123.100:6823/625312955","state":["exists","up"]},{"osd":3,"uuid":"d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6827","nonce":3746026215}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6829","nonce":3746026215}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6833","nonce":3746026215}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6831","nonce":3746026215}]},"public_addr":"192.168.123.100:6827/3746026215","cluster_addr":"192.168.123.100:6829/3746026215","heartbeat_back_addr":"192.168.123.100:6833/3746026215","heartbeat_front_addr":"192.168.123.100:6831/3746026215","state":["exists","up"]},{"osd":4,"uuid":"bd987b29-9261-4c05-b392-d5fcc1540ab1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6801","nonce":1320855616}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6803","nonce":1320855616}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6807","nonce":1320855616}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6805","nonce":1320855616}]},"public_addr":"192.168.123.108:6801/1320855616","cluster_addr":"192.168.123.108:6803/1320855616","heartbeat_back_addr":"192.168.123.108:6807/1320855616","heartbeat_front_addr":"192.168.123.108:6805/1320855616","state":["exists","up"]},{"osd":5,"uuid":"bfe25307-0f2e-4a7c-bbba-a5f33748435b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":32,"up_thru":33,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6809","nonce":2980381074}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6811","nonce":2980381074}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6815","nonce":2980381074}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6813","nonce":2980381074}]},"public_addr":"192.168.123.108:6809/2980381074","cluster_addr":"192.168.123.108:6811/2980381074","heartbeat_back_addr":"192.168.123.108:6815/2980381074","heartbeat_front_addr":"192.168.123.108:6813/2980381074","state":["exists","up"]},{"osd":6,"uuid":"aa0647b4-5cb3-4eba-8664-5989f44fb9e0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":37,"up_thru":38,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6817","nonce":2927703092}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6819","nonce":2927703092}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6823","nonce":2927703092}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6821","nonce":2927703092}]},"public_addr":"192.168.123.108:6817/2927703092","cluster_addr":"192.168.123.108:6819/2927703092","heartbeat_back_addr":"192.168.123.108:6823/2927703092","heartbeat_front_addr":"192.168.123.108:6821/2927703092","state":["exists","up"]},{"osd":7,"uuid":"e08cf409-8102-4b49-ba01-29bb4d30e0ef","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":42,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6825","nonce":4271319373}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6827","nonce":4271319373}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6831","nonce":4271319373}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6829","nonce":4271319373}]},"public_addr":"192.168.123.108:6825/4271319373","cluster_addr":"192.168.123.108:6827/4271319373","heartbeat_back_addr":"192.168.123.108:6831/4271319373","heartbeat_front_addr":"192.168.123.108:6829/4271319373","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:32.073700+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:41.923817+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:51.959246+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:02.221419+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:11.019398+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:19.680092+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:28.578339+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:37.979084+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:6800/2139829498":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/3378709406":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/4050405788":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/1222041110":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/1166590403":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6801/3468375714":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6800/3468375714":"2026-03-09T23:04:13.497891+0000","192.168.123.100:0/1667994189":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6800/1912337238":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/2106940531":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6801/1912337238":"2026-03-09T23:04:01.150333+0000","192.168.123.100:6801/2139829498":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/3693847658":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/154522868":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/2635829435":"2026-03-09T23:04:01.150333+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-08T23:06:44.176 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-08T23:06:44.176 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd dump --format=json 2026-03-08T23:06:44.330 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:44.659 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:44 vm00 ceph-mon[47668]: pgmap v81: 1 pgs: 1 peering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:06:44.659 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:44 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/868657891' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:06:44.659 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:44.659 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":44,"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","created":"2026-03-08T23:03:46.650474+0000","modified":"2026-03-08T23:06:41.553403+0000","last_up_change":"2026-03-08T23:06:39.534209+0000","last_in_change":"2026-03-08T23:06:30.452085+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-08T23:05:53.643147+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"4f19378e-5738-4989-8fed-c2f3af8313ea","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":9,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6803","nonce":3982901667}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6805","nonce":3982901667}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6809","nonce":3982901667}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":3982901667},{"type":"v1","addr":"192.168.123.100:6807","nonce":3982901667}]},"public_addr":"192.168.123.100:6803/3982901667","cluster_addr":"192.168.123.100:6805/3982901667","heartbeat_back_addr":"192.168.123.100:6809/3982901667","heartbeat_front_addr":"192.168.123.100:6807/3982901667","state":["exists","up"]},{"osd":1,"uuid":"623f8038-6c2f-49df-b1ca-6a03a815b1a6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":13,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6811","nonce":1974704816}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6813","nonce":1974704816}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6817","nonce":1974704816}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":1974704816},{"type":"v1","addr":"192.168.123.100:6815","nonce":1974704816}]},"public_addr":"192.168.123.100:6811/1974704816","cluster_addr":"192.168.123.100:6813/1974704816","heartbeat_back_addr":"192.168.123.100:6817/1974704816","heartbeat_front_addr":"192.168.123.100:6815/1974704816","state":["exists","up"]},{"osd":2,"uuid":"d43872a3-385b-40c9-8132-04eb471428b6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6819","nonce":625312955}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6821","nonce":625312955}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6825","nonce":625312955}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":625312955},{"type":"v1","addr":"192.168.123.100:6823","nonce":625312955}]},"public_addr":"192.168.123.100:6819/625312955","cluster_addr":"192.168.123.100:6821/625312955","heartbeat_back_addr":"192.168.123.100:6825/625312955","heartbeat_front_addr":"192.168.123.100:6823/625312955","state":["exists","up"]},{"osd":3,"uuid":"d76b6d00-93d6-4c3b-b7fb-0e4aabef6a84","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6827","nonce":3746026215}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6829","nonce":3746026215}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6833","nonce":3746026215}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3746026215},{"type":"v1","addr":"192.168.123.100:6831","nonce":3746026215}]},"public_addr":"192.168.123.100:6827/3746026215","cluster_addr":"192.168.123.100:6829/3746026215","heartbeat_back_addr":"192.168.123.100:6833/3746026215","heartbeat_front_addr":"192.168.123.100:6831/3746026215","state":["exists","up"]},{"osd":4,"uuid":"bd987b29-9261-4c05-b392-d5fcc1540ab1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6801","nonce":1320855616}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6803","nonce":1320855616}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6807","nonce":1320855616}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":1320855616},{"type":"v1","addr":"192.168.123.108:6805","nonce":1320855616}]},"public_addr":"192.168.123.108:6801/1320855616","cluster_addr":"192.168.123.108:6803/1320855616","heartbeat_back_addr":"192.168.123.108:6807/1320855616","heartbeat_front_addr":"192.168.123.108:6805/1320855616","state":["exists","up"]},{"osd":5,"uuid":"bfe25307-0f2e-4a7c-bbba-a5f33748435b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":32,"up_thru":33,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6809","nonce":2980381074}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6811","nonce":2980381074}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6815","nonce":2980381074}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":2980381074},{"type":"v1","addr":"192.168.123.108:6813","nonce":2980381074}]},"public_addr":"192.168.123.108:6809/2980381074","cluster_addr":"192.168.123.108:6811/2980381074","heartbeat_back_addr":"192.168.123.108:6815/2980381074","heartbeat_front_addr":"192.168.123.108:6813/2980381074","state":["exists","up"]},{"osd":6,"uuid":"aa0647b4-5cb3-4eba-8664-5989f44fb9e0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":37,"up_thru":38,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6817","nonce":2927703092}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6819","nonce":2927703092}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6823","nonce":2927703092}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2927703092},{"type":"v1","addr":"192.168.123.108:6821","nonce":2927703092}]},"public_addr":"192.168.123.108:6817/2927703092","cluster_addr":"192.168.123.108:6819/2927703092","heartbeat_back_addr":"192.168.123.108:6823/2927703092","heartbeat_front_addr":"192.168.123.108:6821/2927703092","state":["exists","up"]},{"osd":7,"uuid":"e08cf409-8102-4b49-ba01-29bb4d30e0ef","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":42,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6825","nonce":4271319373}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6827","nonce":4271319373}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6831","nonce":4271319373}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":4271319373},{"type":"v1","addr":"192.168.123.108:6829","nonce":4271319373}]},"public_addr":"192.168.123.108:6825/4271319373","cluster_addr":"192.168.123.108:6827/4271319373","heartbeat_back_addr":"192.168.123.108:6831/4271319373","heartbeat_front_addr":"192.168.123.108:6829/4271319373","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:32.073700+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:41.923817+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:05:51.959246+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:02.221419+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:11.019398+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:19.680092+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:28.578339+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:06:37.979084+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:6800/2139829498":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/3378709406":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/4050405788":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/1222041110":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/1166590403":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6801/3468375714":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6800/3468375714":"2026-03-09T23:04:13.497891+0000","192.168.123.100:0/1667994189":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6800/1912337238":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/2106940531":"2026-03-09T23:04:13.497891+0000","192.168.123.100:6801/1912337238":"2026-03-09T23:04:01.150333+0000","192.168.123.100:6801/2139829498":"2026-03-09T23:05:05.322539+0000","192.168.123.100:0/3693847658":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/154522868":"2026-03-09T23:04:01.150333+0000","192.168.123.100:0/2635829435":"2026-03-09T23:04:01.150333+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-08T23:06:44.726 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.0 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.1 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.2 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.3 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.4 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.5 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.6 flush_pg_stats 2026-03-08T23:06:44.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph tell osd.7 flush_pg_stats 2026-03-08T23:06:44.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:44 vm08 ceph-mon[56824]: pgmap v81: 1 pgs: 1 peering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:06:44.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:44 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/868657891' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:06:45.336 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.417 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.453 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.461 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.463 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.469 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.487 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.531 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:45.743 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:45 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1590143820' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:06:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:45 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/1590143820' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:06:46.568 INFO:teuthology.orchestra.run.vm00.stdout:115964117000 2026-03-08T23:06:46.568 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.4 2026-03-08T23:06:46.691 INFO:teuthology.orchestra.run.vm00.stdout:38654705680 2026-03-08T23:06:46.691 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.0 2026-03-08T23:06:46.775 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:46 vm00 ceph-mon[47668]: pgmap v82: 1 pgs: 1 peering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:06:46.819 INFO:teuthology.orchestra.run.vm00.stdout:55834574862 2026-03-08T23:06:46.819 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.1 2026-03-08T23:06:46.857 INFO:teuthology.orchestra.run.vm00.stdout:73014444044 2026-03-08T23:06:46.857 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.2 2026-03-08T23:06:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:46 vm08 ceph-mon[56824]: pgmap v82: 1 pgs: 1 peering; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:06:46.986 INFO:teuthology.orchestra.run.vm00.stdout:158913789957 2026-03-08T23:06:46.987 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.6 2026-03-08T23:06:46.997 INFO:teuthology.orchestra.run.vm00.stdout:180388626435 2026-03-08T23:06:46.997 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.7 2026-03-08T23:06:47.146 INFO:teuthology.orchestra.run.vm00.stdout:137438953479 2026-03-08T23:06:47.147 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.5 2026-03-08T23:06:47.197 INFO:teuthology.orchestra.run.vm00.stdout:98784247818 2026-03-08T23:06:47.197 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph osd last-stat-seq osd.3 2026-03-08T23:06:47.272 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:47.502 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:47.811 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:47.867 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:47.911 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:48.078 INFO:teuthology.orchestra.run.vm00.stdout:38654705680 2026-03-08T23:06:48.322 INFO:tasks.cephadm.ceph_manager.ceph:need seq 38654705680 got 38654705680 for osd.0 2026-03-08T23:06:48.323 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:48.340 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:48.341 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:48.348 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:48.495 INFO:teuthology.orchestra.run.vm00.stdout:115964117001 2026-03-08T23:06:48.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:48 vm00 ceph-mon[47668]: pgmap v83: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-08T23:06:48.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:48 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1173101781' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-08T23:06:48.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:48 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/960314906' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-08T23:06:48.681 INFO:tasks.cephadm.ceph_manager.ceph:need seq 115964117000 got 115964117001 for osd.4 2026-03-08T23:06:48.681 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:48 vm08 ceph-mon[56824]: pgmap v83: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-08T23:06:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:48 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/1173101781' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-08T23:06:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:48 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/960314906' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-08T23:06:49.215 INFO:teuthology.orchestra.run.vm00.stdout:55834574862 2026-03-08T23:06:49.221 INFO:teuthology.orchestra.run.vm00.stdout:180388626435 2026-03-08T23:06:49.313 INFO:tasks.cephadm.ceph_manager.ceph:need seq 55834574862 got 55834574862 for osd.1 2026-03-08T23:06:49.313 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:49.359 INFO:teuthology.orchestra.run.vm00.stdout:137438953479 2026-03-08T23:06:49.419 INFO:teuthology.orchestra.run.vm00.stdout:98784247818 2026-03-08T23:06:49.429 INFO:tasks.cephadm.ceph_manager.ceph:need seq 137438953479 got 137438953479 for osd.5 2026-03-08T23:06:49.429 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:49.496 INFO:tasks.cephadm.ceph_manager.ceph:need seq 180388626435 got 180388626435 for osd.7 2026-03-08T23:06:49.496 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:49.506 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247818 got 98784247818 for osd.3 2026-03-08T23:06:49.506 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:49.524 INFO:teuthology.orchestra.run.vm00.stdout:73014444044 2026-03-08T23:06:49.572 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444044 got 73014444044 for osd.2 2026-03-08T23:06:49.572 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:49.581 INFO:teuthology.orchestra.run.vm00.stdout:158913789957 2026-03-08T23:06:49.646 INFO:tasks.cephadm.ceph_manager.ceph:need seq 158913789957 got 158913789957 for osd.6 2026-03-08T23:06:49.646 DEBUG:teuthology.parallel:result is None 2026-03-08T23:06:49.646 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-08T23:06:49.646 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph pg dump --format=json 2026-03-08T23:06:49.722 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/2792655241' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-08T23:06:49.722 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1197542131' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-08T23:06:49.722 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/1723788849' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-08T23:06:49.722 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/453252000' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-08T23:06:49.722 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/4116088591' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-08T23:06:49.723 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:49 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/4219323241' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-08T23:06:49.846 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:49 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/2792655241' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-08T23:06:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:49 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/1197542131' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-08T23:06:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:49 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/1723788849' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-08T23:06:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:49 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/453252000' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-08T23:06:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:49 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/4116088591' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-08T23:06:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:49 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/4219323241' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-08T23:06:50.200 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:50.204 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-08T23:06:50.275 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":84,"stamp":"2026-03-08T23:06:49.439262+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":5,"num_osds":8,"num_per_pool_osds":5,"num_per_pool_omap_osds":5,"kb":167739392,"kb_used":49096,"kb_used_data":5000,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690296,"statfs":{"total":171765137408,"available":171714863104,"internally_reserved":0,"allocated":5120000,"data_stored":3020819,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.884614"},"pg_stats":[{"pgid":"1.0","version":"20'76","reported_seq":13,"reported_epoch":44,"state":"active+recovering","last_fresh":"2026-03-08T23:06:45.033048+0000","last_change":"2026-03-08T23:06:42.429783+0000","last_active":"2026-03-08T23:06:45.033048+0000","last_peered":"2026-03-08T23:06:45.033048+0000","last_clean":"2026-03-08T23:05:59.324044+0000","last_became_active":"2026-03-08T23:06:41.562488+0000","last_became_peered":"2026-03-08T23:06:41.562488+0000","last_unstale":"2026-03-08T23:06:45.033048+0000","last_undegraded":"2026-03-08T23:06:45.033048+0000","last_fullsized":"2026-03-08T23:06:45.033048+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":39,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_clean_scrub_stamp":"2026-03-08T23:05:53.873544+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:17:22.236209+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":["7","0","1","5","6"],"object_location_counts":[{"shards":"0,6,7","objects":2}],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1605632,"data_stored":1591360,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626435,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6076,"kb_used_data":820,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961348,"statfs":{"total":21470642176,"available":21464420352,"internally_reserved":0,"allocated":839680,"data_stored":576433,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.92700000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.372}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82099999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86799999999999999}]}]},{"osd":6,"up_from":37,"seq":158913789957,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6080,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961344,"statfs":{"total":21470642176,"available":21464416256,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.98199999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2509999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1639999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58899999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54200000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.33300000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]}]},{"osd":1,"up_from":13,"seq":55834574862,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6648,"kb_used_data":816,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20960776,"statfs":{"total":21470642176,"available":21463834624,"internally_reserved":0,"allocated":835584,"data_stored":576189,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:06:45 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.251,"5min":0.251,"15min":0.251},"max":{"1min":1.022,"5min":1.022,"15min":1.022},"last":0.65500000000000003},{"interface":"front","average":{"1min":0.53300000000000003,"5min":0.53300000000000003,"15min":0.53300000000000003},"min":{"1min":0.23799999999999999,"5min":0.23799999999999999,"15min":0.23799999999999999},"max":{"1min":0.93300000000000005,"5min":0.93300000000000005,"15min":0.93300000000000005},"last":0.35699999999999998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72399999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78800000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]}]},{"osd":0,"up_from":9,"seq":38654705681,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6720,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960704,"statfs":{"total":21470642176,"available":21463760896,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50600000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52100000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67900000000000005}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]}]},{"osd":2,"up_from":17,"seq":73014444045,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6264,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961160,"statfs":{"total":21470642176,"available":21464227840,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74399999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57699999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68600000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70599999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47199999999999998}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69699999999999995}]}]},{"osd":3,"up_from":23,"seq":98784247819,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5752,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961672,"statfs":{"total":21470642176,"available":21464752128,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66900000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.496}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72099999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53200000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91200000000000003}]}]},{"osd":4,"up_from":27,"seq":115964117001,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5812,"kb_used_data":428,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961612,"statfs":{"total":21470642176,"available":21464690688,"internally_reserved":0,"allocated":438272,"data_stored":178593,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51700000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58599999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69599999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79000000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48999999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77200000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89600000000000002}]}]},{"osd":5,"up_from":32,"seq":137438953479,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":178292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81000000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.433}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71199999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94699999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72199999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45500000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89900000000000002}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:06:50.275 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph pg dump --format=json 2026-03-08T23:06:50.469 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:50.803 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:50.803 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:50 vm00 ceph-mon[47668]: pgmap v84: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s, 0 objects/s recovering 2026-03-08T23:06:50.806 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-08T23:06:50.868 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":84,"stamp":"2026-03-08T23:06:49.439262+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":5,"num_osds":8,"num_per_pool_osds":5,"num_per_pool_omap_osds":5,"kb":167739392,"kb_used":49096,"kb_used_data":5000,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690296,"statfs":{"total":171765137408,"available":171714863104,"internally_reserved":0,"allocated":5120000,"data_stored":3020819,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.884614"},"pg_stats":[{"pgid":"1.0","version":"20'76","reported_seq":13,"reported_epoch":44,"state":"active+recovering","last_fresh":"2026-03-08T23:06:45.033048+0000","last_change":"2026-03-08T23:06:42.429783+0000","last_active":"2026-03-08T23:06:45.033048+0000","last_peered":"2026-03-08T23:06:45.033048+0000","last_clean":"2026-03-08T23:05:59.324044+0000","last_became_active":"2026-03-08T23:06:41.562488+0000","last_became_peered":"2026-03-08T23:06:41.562488+0000","last_unstale":"2026-03-08T23:06:45.033048+0000","last_undegraded":"2026-03-08T23:06:45.033048+0000","last_fullsized":"2026-03-08T23:06:45.033048+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":39,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_clean_scrub_stamp":"2026-03-08T23:05:53.873544+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:17:22.236209+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":["7","0","1","5","6"],"object_location_counts":[{"shards":"0,6,7","objects":2}],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1605632,"data_stored":1591360,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626435,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6076,"kb_used_data":820,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961348,"statfs":{"total":21470642176,"available":21464420352,"internally_reserved":0,"allocated":839680,"data_stored":576433,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.92700000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.372}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82099999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86799999999999999}]}]},{"osd":6,"up_from":37,"seq":158913789957,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6080,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961344,"statfs":{"total":21470642176,"available":21464416256,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.98199999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2509999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1639999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58899999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54200000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.33300000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]}]},{"osd":1,"up_from":13,"seq":55834574862,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6648,"kb_used_data":816,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20960776,"statfs":{"total":21470642176,"available":21463834624,"internally_reserved":0,"allocated":835584,"data_stored":576189,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:06:45 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.251,"5min":0.251,"15min":0.251},"max":{"1min":1.022,"5min":1.022,"15min":1.022},"last":0.65500000000000003},{"interface":"front","average":{"1min":0.53300000000000003,"5min":0.53300000000000003,"15min":0.53300000000000003},"min":{"1min":0.23799999999999999,"5min":0.23799999999999999,"15min":0.23799999999999999},"max":{"1min":0.93300000000000005,"5min":0.93300000000000005,"15min":0.93300000000000005},"last":0.35699999999999998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72399999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78800000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]}]},{"osd":0,"up_from":9,"seq":38654705681,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6720,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960704,"statfs":{"total":21470642176,"available":21463760896,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50600000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52100000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67900000000000005}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]}]},{"osd":2,"up_from":17,"seq":73014444045,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6264,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961160,"statfs":{"total":21470642176,"available":21464227840,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74399999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57699999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68600000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70599999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47199999999999998}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69699999999999995}]}]},{"osd":3,"up_from":23,"seq":98784247819,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5752,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961672,"statfs":{"total":21470642176,"available":21464752128,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66900000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.496}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72099999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53200000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91200000000000003}]}]},{"osd":4,"up_from":27,"seq":115964117001,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5812,"kb_used_data":428,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961612,"statfs":{"total":21470642176,"available":21464690688,"internally_reserved":0,"allocated":438272,"data_stored":178593,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51700000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58599999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69599999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79000000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48999999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77200000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89600000000000002}]}]},{"osd":5,"up_from":32,"seq":137438953479,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":178292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81000000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.433}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71199999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94699999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72199999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45500000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89900000000000002}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:06:50.868 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph pg dump --format=json 2026-03-08T23:06:51.013 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:50 vm08 ceph-mon[56824]: pgmap v84: 1 pgs: 1 active+recovering; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s, 0 objects/s recovering 2026-03-08T23:06:51.339 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:51.343 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-08T23:06:51.388 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":84,"stamp":"2026-03-08T23:06:49.439262+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":5,"num_osds":8,"num_per_pool_osds":5,"num_per_pool_omap_osds":5,"kb":167739392,"kb_used":49096,"kb_used_data":5000,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690296,"statfs":{"total":171765137408,"available":171714863104,"internally_reserved":0,"allocated":5120000,"data_stored":3020819,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.884614"},"pg_stats":[{"pgid":"1.0","version":"20'76","reported_seq":13,"reported_epoch":44,"state":"active+recovering","last_fresh":"2026-03-08T23:06:45.033048+0000","last_change":"2026-03-08T23:06:42.429783+0000","last_active":"2026-03-08T23:06:45.033048+0000","last_peered":"2026-03-08T23:06:45.033048+0000","last_clean":"2026-03-08T23:05:59.324044+0000","last_became_active":"2026-03-08T23:06:41.562488+0000","last_became_peered":"2026-03-08T23:06:41.562488+0000","last_unstale":"2026-03-08T23:06:45.033048+0000","last_undegraded":"2026-03-08T23:06:45.033048+0000","last_fullsized":"2026-03-08T23:06:45.033048+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":39,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_clean_scrub_stamp":"2026-03-08T23:05:53.873544+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:17:22.236209+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":["7","0","1","5","6"],"object_location_counts":[{"shards":"0,6,7","objects":2}],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1605632,"data_stored":1591360,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626435,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6076,"kb_used_data":820,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961348,"statfs":{"total":21470642176,"available":21464420352,"internally_reserved":0,"allocated":839680,"data_stored":576433,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.92700000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.372}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82099999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86799999999999999}]}]},{"osd":6,"up_from":37,"seq":158913789957,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6080,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961344,"statfs":{"total":21470642176,"available":21464416256,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.98199999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2509999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1639999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58899999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54200000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.33300000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]}]},{"osd":1,"up_from":13,"seq":55834574862,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6648,"kb_used_data":816,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20960776,"statfs":{"total":21470642176,"available":21463834624,"internally_reserved":0,"allocated":835584,"data_stored":576189,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:06:45 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.251,"5min":0.251,"15min":0.251},"max":{"1min":1.022,"5min":1.022,"15min":1.022},"last":0.65500000000000003},{"interface":"front","average":{"1min":0.53300000000000003,"5min":0.53300000000000003,"15min":0.53300000000000003},"min":{"1min":0.23799999999999999,"5min":0.23799999999999999,"15min":0.23799999999999999},"max":{"1min":0.93300000000000005,"5min":0.93300000000000005,"15min":0.93300000000000005},"last":0.35699999999999998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72399999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78800000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]}]},{"osd":0,"up_from":9,"seq":38654705681,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6720,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960704,"statfs":{"total":21470642176,"available":21463760896,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50600000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52100000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67900000000000005}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]}]},{"osd":2,"up_from":17,"seq":73014444045,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6264,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961160,"statfs":{"total":21470642176,"available":21464227840,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74399999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57699999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68600000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70599999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47199999999999998}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69699999999999995}]}]},{"osd":3,"up_from":23,"seq":98784247819,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5752,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961672,"statfs":{"total":21470642176,"available":21464752128,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66900000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.496}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72099999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53200000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91200000000000003}]}]},{"osd":4,"up_from":27,"seq":115964117001,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5812,"kb_used_data":428,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961612,"statfs":{"total":21470642176,"available":21464690688,"internally_reserved":0,"allocated":438272,"data_stored":178593,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51700000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58599999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69599999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79000000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48999999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77200000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89600000000000002}]}]},{"osd":5,"up_from":32,"seq":137438953479,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":178292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81000000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.433}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71199999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94699999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72199999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45500000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89900000000000002}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:06:51.388 INFO:tasks.cephadm.ceph_manager.ceph:PG 1.0 is not active+clean 2026-03-08T23:06:51.388 INFO:tasks.cephadm.ceph_manager.ceph:{'pgid': '1.0', 'version': "20'76", 'reported_seq': 13, 'reported_epoch': 44, 'state': 'active+recovering', 'last_fresh': '2026-03-08T23:06:45.033048+0000', 'last_change': '2026-03-08T23:06:42.429783+0000', 'last_active': '2026-03-08T23:06:45.033048+0000', 'last_peered': '2026-03-08T23:06:45.033048+0000', 'last_clean': '2026-03-08T23:05:59.324044+0000', 'last_became_active': '2026-03-08T23:06:41.562488+0000', 'last_became_peered': '2026-03-08T23:06:41.562488+0000', 'last_unstale': '2026-03-08T23:06:45.033048+0000', 'last_undegraded': '2026-03-08T23:06:45.033048+0000', 'last_fullsized': '2026-03-08T23:06:45.033048+0000', 'mapping_epoch': 43, 'log_start': "0'0", 'ondisk_log_start': "0'0", 'created': 18, 'last_epoch_clean': 39, 'parent': '0.0', 'parent_split_bits': 0, 'last_scrub': "0'0", 'last_scrub_stamp': '2026-03-08T23:05:53.873544+0000', 'last_deep_scrub': "0'0", 'last_deep_scrub_stamp': '2026-03-08T23:05:53.873544+0000', 'last_clean_scrub_stamp': '2026-03-08T23:05:53.873544+0000', 'objects_scrubbed': 0, 'log_size': 76, 'ondisk_log_size': 76, 'stats_invalid': False, 'dirty_stats_invalid': False, 'omap_stats_invalid': False, 'hitset_stats_invalid': False, 'hitset_bytes_stats_invalid': False, 'pin_stats_invalid': False, 'manifest_stats_invalid': False, 'snaptrimq_len': 0, 'last_scrub_duration': 0, 'scrub_schedule': 'periodic scrub scheduled @ 2026-03-10T03:17:22.236209+0000', 'scrub_duration': 0, 'objects_trimmed': 0, 'snaptrim_duration': 0, 'stat_sum': {'num_bytes': 459280, 'num_objects': 2, 'num_object_clones': 0, 'num_object_copies': 6, 'num_objects_missing_on_primary': 0, 'num_objects_missing': 0, 'num_objects_degraded': 0, 'num_objects_misplaced': 0, 'num_objects_unfound': 0, 'num_objects_dirty': 2, 'num_whiteouts': 0, 'num_read': 96, 'num_read_kb': 82, 'num_write': 113, 'num_write_kb': 1372, 'num_scrub_errors': 0, 'num_shallow_scrub_errors': 0, 'num_deep_scrub_errors': 0, 'num_objects_recovered': 2, 'num_bytes_recovered': 397840, 'num_keys_recovered': 0, 'num_objects_omap': 0, 'num_objects_hit_set_archive': 0, 'num_bytes_hit_set_archive': 0, 'num_flush': 0, 'num_flush_kb': 0, 'num_evict': 0, 'num_evict_kb': 0, 'num_promote': 0, 'num_flush_mode_high': 0, 'num_flush_mode_low': 0, 'num_evict_mode_some': 0, 'num_evict_mode_full': 0, 'num_objects_pinned': 0, 'num_legacy_snapsets': 0, 'num_large_omap_objects': 0, 'num_objects_manifest': 0, 'num_omap_bytes': 0, 'num_omap_keys': 0, 'num_objects_repaired': 0}, 'up': [7, 0, 6], 'acting': [7, 0, 6], 'avail_no_missing': ['7', '0', '1', '5', '6'], 'object_location_counts': [{'shards': '0,6,7', 'objects': 2}], 'blocked_by': [], 'up_primary': 7, 'acting_primary': 7, 'purged_snaps': []} 2026-03-08T23:06:51.388 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph status --format=json 2026-03-08T23:06:51.534 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:51.902 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:51 vm00 ceph-mon[47668]: from='client.14488 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:51.902 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:51 vm00 ceph-mon[47668]: from='client.14492 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:51.902 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:51.902 INFO:teuthology.orchestra.run.vm00.stdout:{"fsid":"cabe2722-1b42-11f1-9450-0d39870fd3ae","health":{"status":"HEALTH_OK","checks":{},"mutes":[]},"election_epoch":10,"quorum":[0,1],"quorum_names":["vm00","vm08"],"quorum_age":94,"monmap":{"epoch":2,"min_mon_release_name":"quincy","num_mons":2},"osdmap":{"epoch":44,"num_osds":8,"num_up_osds":8,"osd_up_since":1773011199,"num_in_osds":8,"osd_in_since":1773011190,"num_remapped_pgs":0},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":1}],"num_pgs":1,"num_pools":1,"num_objects":2,"data_bytes":459280,"bytes_used":49872896,"bytes_avail":171715264512,"bytes_total":171765137408,"recovering_objects_per_sec":0,"recovering_bytes_per_sec":39778,"recovering_keys_per_sec":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0},"fsmap":{"epoch":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":1,"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"services":{"dashboard":"https://192.168.123.100:8443/","prometheus":"http://192.168.123.100:9283/"}},"servicemap":{"epoch":6,"modified":"2026-03-08T23:06:45.438731+0000","services":{"osd":{"daemons":{"summary":"","2":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}},"3":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}},"4":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}},"5":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}},"6":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}},"7":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}}}}}},"progress_events":{}} 2026-03-08T23:06:51.972 INFO:tasks.cephadm.ceph_manager.ceph:making progress, resetting timeout 2026-03-08T23:06:51.972 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph pg dump --format=json 2026-03-08T23:06:52.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:51 vm08 ceph-mon[56824]: from='client.14488 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:52.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:51 vm08 ceph-mon[56824]: from='client.14492 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:52.132 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:52.518 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:52.521 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-08T23:06:52.592 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":85,"stamp":"2026-03-08T23:06:51.439565+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":4,"num_osds":8,"num_per_pool_osds":4,"num_per_pool_omap_osds":4,"kb":167739392,"kb_used":48704,"kb_used_data":4608,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690688,"statfs":{"total":171765137408,"available":171715264512,"internally_reserved":0,"allocated":4718592,"data_stored":2622979,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":6,"apply_latency_ms":6,"commit_latency_ns":6000000,"apply_latency_ns":6000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"10.001383"},"pg_stats":[{"pgid":"1.0","version":"20'76","reported_seq":16,"reported_epoch":44,"state":"active+clean","last_fresh":"2026-03-08T23:06:47.627476+0000","last_change":"2026-03-08T23:06:47.627476+0000","last_active":"2026-03-08T23:06:47.627476+0000","last_peered":"2026-03-08T23:06:47.627476+0000","last_clean":"2026-03-08T23:06:47.627476+0000","last_became_active":"2026-03-08T23:06:41.562488+0000","last_became_peered":"2026-03-08T23:06:41.562488+0000","last_unstale":"2026-03-08T23:06:47.627476+0000","last_undegraded":"2026-03-08T23:06:47.627476+0000","last_fullsized":"2026-03-08T23:06:47.627476+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":44,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_clean_scrub_stamp":"2026-03-08T23:05:53.873544+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:17:22.236209+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626436,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6076,"kb_used_data":820,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961348,"statfs":{"total":21470642176,"available":21464420352,"internally_reserved":0,"allocated":839680,"data_stored":576433,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.92700000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.372}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82099999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86799999999999999}]}]},{"osd":6,"up_from":37,"seq":158913789958,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6080,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961344,"statfs":{"total":21470642176,"available":21464416256,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71199999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74299999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71699999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.754}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.504}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51600000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60499999999999998}]}]},{"osd":1,"up_from":13,"seq":55834574863,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6256,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961168,"statfs":{"total":21470642176,"available":21464236032,"internally_reserved":0,"allocated":434176,"data_stored":178349,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:06:45 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.251,"5min":0.251,"15min":0.251},"max":{"1min":1.022,"5min":1.022,"15min":1.022},"last":0.56100000000000005},{"interface":"front","average":{"1min":0.53300000000000003,"5min":0.53300000000000003,"15min":0.53300000000000003},"min":{"1min":0.23799999999999999,"5min":0.23799999999999999,"15min":0.23799999999999999},"max":{"1min":0.93300000000000005,"5min":0.93300000000000005,"15min":0.93300000000000005},"last":0.44900000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46300000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.502}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57799999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49199999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58899999999999997}]}]},{"osd":0,"up_from":9,"seq":38654705681,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6720,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960704,"statfs":{"total":21470642176,"available":21463760896,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50600000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52100000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67900000000000005}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69899999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]}]},{"osd":2,"up_from":17,"seq":73014444045,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6264,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961160,"statfs":{"total":21470642176,"available":21464227840,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74399999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57699999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68600000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70599999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47199999999999998}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69699999999999995}]}]},{"osd":3,"up_from":23,"seq":98784247819,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5752,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961672,"statfs":{"total":21470642176,"available":21464752128,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66900000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.496}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72099999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73299999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53200000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91200000000000003}]}]},{"osd":4,"up_from":27,"seq":115964117001,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5812,"kb_used_data":428,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961612,"statfs":{"total":21470642176,"available":21464690688,"internally_reserved":0,"allocated":438272,"data_stored":178593,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51700000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58599999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69599999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79000000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48999999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77200000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89600000000000002}]}]},{"osd":5,"up_from":32,"seq":137438953480,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":178292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60499999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59899999999999998}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.499}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57999999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50900000000000001}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:06:52.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:52 vm00 ceph-mon[47668]: from='client.14496 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:52.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:52 vm00 ceph-mon[47668]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s, 0 objects/s recovering 2026-03-08T23:06:52.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:52 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/544271337' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch 2026-03-08T23:06:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:52 vm08 ceph-mon[56824]: from='client.14496 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:52 vm08 ceph-mon[56824]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s, 0 objects/s recovering 2026-03-08T23:06:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:52 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/544271337' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch 2026-03-08T23:06:53.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:53 vm00 ceph-mon[47668]: from='client.14504 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:54.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:53 vm08 ceph-mon[56824]: from='client.14504 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:54 vm00 ceph-mon[47668]: pgmap v86: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s, 0 objects/s recovering 2026-03-08T23:06:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:54 vm08 ceph-mon[56824]: pgmap v86: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s, 0 objects/s recovering 2026-03-08T23:06:55.593 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph pg dump --format=json 2026-03-08T23:06:55.744 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:56.070 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:56.073 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-08T23:06:56.131 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":87,"stamp":"2026-03-08T23:06:55.440124+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":4,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48704,"kb_used_data":4608,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690688,"statfs":{"total":171765137408,"available":171715264512,"internally_reserved":0,"allocated":4718592,"data_stored":2622979,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":8,"apply_latency_ms":8,"commit_latency_ns":8000000,"apply_latency_ns":8000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"12.001700"},"pg_stats":[{"pgid":"1.0","version":"20'76","reported_seq":16,"reported_epoch":44,"state":"active+clean","last_fresh":"2026-03-08T23:06:47.627476+0000","last_change":"2026-03-08T23:06:47.627476+0000","last_active":"2026-03-08T23:06:47.627476+0000","last_peered":"2026-03-08T23:06:47.627476+0000","last_clean":"2026-03-08T23:06:47.627476+0000","last_became_active":"2026-03-08T23:06:41.562488+0000","last_became_peered":"2026-03-08T23:06:41.562488+0000","last_unstale":"2026-03-08T23:06:47.627476+0000","last_undegraded":"2026-03-08T23:06:47.627476+0000","last_fullsized":"2026-03-08T23:06:47.627476+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":44,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:05:53.873544+0000","last_clean_scrub_stamp":"2026-03-08T23:05:53.873544+0000","objects_scrubbed":0,"log_size":76,"ondisk_log_size":76,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:17:22.236209+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":96,"num_read_kb":82,"num_write":113,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":76,"ondisk_log_size":76,"up":3,"acting":3,"num_store_stats":7}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626436,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6076,"kb_used_data":820,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961348,"statfs":{"total":21470642176,"available":21464420352,"internally_reserved":0,"allocated":839680,"data_stored":576433,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":2,"apply_latency_ms":2,"commit_latency_ns":2000000,"apply_latency_ns":2000000},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.92700000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.372}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82099999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86799999999999999}]}]},{"osd":6,"up_from":37,"seq":158913789959,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6080,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961344,"statfs":{"total":21470642176,"available":21464416256,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51600000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63200000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.745}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80400000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46999999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48399999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58499999999999996}]}]},{"osd":1,"up_from":13,"seq":55834574864,"num_pgs":1,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6256,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961168,"statfs":{"total":21470642176,"available":21464236032,"internally_reserved":0,"allocated":434176,"data_stored":178349,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":6,"apply_latency_ms":6,"commit_latency_ns":6000000,"apply_latency_ns":6000000},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:06:45 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.251,"5min":0.251,"15min":0.251},"max":{"1min":1.022,"5min":1.022,"15min":1.022},"last":0.435},{"interface":"front","average":{"1min":0.53300000000000003,"5min":0.53300000000000003,"15min":0.53300000000000003},"min":{"1min":0.23799999999999999,"5min":0.23799999999999999,"15min":0.23799999999999999},"max":{"1min":0.93300000000000005,"5min":0.93300000000000005,"15min":0.93300000000000005},"last":0.32700000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54300000000000004}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66000000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.755}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52300000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71099999999999997}]}]},{"osd":0,"up_from":9,"seq":38654705682,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6720,"kb_used_data":824,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960704,"statfs":{"total":21470642176,"available":21463760896,"internally_reserved":0,"allocated":843776,"data_stored":576748,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Sun Mar 8 23:06:49 2026","interfaces":[{"interface":"back","average":{"1min":0.50800000000000001,"5min":0.50800000000000001,"15min":0.50800000000000001},"min":{"1min":0.20699999999999999,"5min":0.20699999999999999,"15min":0.20699999999999999},"max":{"1min":1.256,"5min":1.256,"15min":1.256},"last":0.42299999999999999},{"interface":"front","average":{"1min":0.48699999999999999,"5min":0.48699999999999999,"15min":0.48699999999999999},"min":{"1min":0.23799999999999999,"5min":0.23799999999999999,"15min":0.23799999999999999},"max":{"1min":0.77800000000000002,"5min":0.77800000000000002,"15min":0.77800000000000002},"last":0.60299999999999998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58399999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48999999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65700000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71899999999999997}]}]},{"osd":2,"up_from":17,"seq":73014444046,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6264,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961160,"statfs":{"total":21470642176,"available":21464227840,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77200000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59299999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68999999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67200000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.75900000000000001}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56399999999999995}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70199999999999996}]}]},{"osd":3,"up_from":23,"seq":98784247820,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5752,"kb_used_data":432,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961672,"statfs":{"total":21470642176,"available":21464752128,"internally_reserved":0,"allocated":442368,"data_stored":178908,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51600000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.32200000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52300000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64400000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53700000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71499999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83699999999999997}]}]},{"osd":4,"up_from":27,"seq":115964117002,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5812,"kb_used_data":428,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961612,"statfs":{"total":21470642176,"available":21464690688,"internally_reserved":0,"allocated":438272,"data_stored":178593,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60999999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63100000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64700000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2350000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.441}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53700000000000003}]}]},{"osd":5,"up_from":32,"seq":137438953480,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":178292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56299999999999994}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60499999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59899999999999998}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.499}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57999999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50900000000000001}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:06:56.132 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-08T23:06:56.132 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-08T23:06:56.132 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-08T23:06:56.132 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph health --format=json 2026-03-08T23:06:56.278 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/cabe2722-1b42-11f1-9450-0d39870fd3ae/mon.vm00/config 2026-03-08T23:06:56.629 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:06:56.629 INFO:teuthology.orchestra.run.vm00.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-08T23:06:56.678 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-08T23:06:56.678 INFO:tasks.cephadm:Setup complete, yielding 2026-03-08T23:06:56.678 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-08T23:06:56.680 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-03-08T23:06:56.680 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph orch status' 2026-03-08T23:06:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:56 vm08 ceph-mon[56824]: pgmap v87: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-08T23:06:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:56 vm08 ceph-mon[56824]: from='client.? 192.168.123.100:0/689821458' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-08T23:06:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:56 vm00 ceph-mon[47668]: pgmap v87: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-08T23:06:56.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:56 vm00 ceph-mon[47668]: from='client.? 192.168.123.100:0/689821458' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-08T23:06:57.154 INFO:teuthology.orchestra.run.vm00.stdout:Backend: cephadm 2026-03-08T23:06:57.154 INFO:teuthology.orchestra.run.vm00.stdout:Available: Yes 2026-03-08T23:06:57.154 INFO:teuthology.orchestra.run.vm00.stdout:Paused: No 2026-03-08T23:06:57.219 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph orch ps' 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.vm00 vm00 *:9093,9094 running (87s) 48s ago 2m 18.7M - ba2b418f427c cbd93685d2a3 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm00 vm00 running (2m) 48s ago 2m 6953k - 17.2.0 e1d6a67b021e a884e156e3ea 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm08 vm08 running (108s) 20s ago 107s 7084k - 17.2.0 e1d6a67b021e fb8891081fff 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:grafana.vm00 vm00 *:3000 running (85s) 48s ago 2m 43.8M - 8.3.5 dad864ee21e9 bcd452151d0c 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm00.pkgtpt vm00 *:9283 running (3m) 48s ago 3m 459M - 17.2.0 e1d6a67b021e 18ea8de306a7 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm08.fufswh vm08 *:8443,9283 running (107s) 20s ago 107s 418M - 17.2.0 e1d6a67b021e 901a8517f8a2 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm00 vm00 running (3m) 48s ago 3m 42.8M 2048M 17.2.0 e1d6a67b021e 6ef5be51d7de 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm08 vm08 running (105s) 20s ago 105s 39.0M 2048M 17.2.0 e1d6a67b021e 0afd4637552f 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm00 vm00 *:9100 running (117s) 48s ago 117s 18.5M - 1dbe0e931976 4520be544895 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm08 vm08 *:9100 running (102s) 20s ago 102s 18.6M - 1dbe0e931976 6cbf70a23f8e 2026-03-08T23:06:57.681 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (88s) 48s ago 88s 33.7M 4096M 17.2.0 e1d6a67b021e 99ce569a663b 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (78s) 48s ago 78s 36.7M 4096M 17.2.0 e1d6a67b021e 2909d9f8129a 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (68s) 48s ago 68s 31.7M 4096M 17.2.0 e1d6a67b021e abd8088208b7 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (58s) 48s ago 58s 30.3M 4096M 17.2.0 e1d6a67b021e bbc04b502b55 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (49s) 20s ago 49s 33.5M 4096M 17.2.0 e1d6a67b021e cb51c51c7974 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (40s) 20s ago 40s 35.8M 4096M 17.2.0 e1d6a67b021e 4025309b6947 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (32s) 20s ago 31s 34.8M 4096M 17.2.0 e1d6a67b021e a5b2024983c1 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (21s) 20s ago 21s 12.8M 4096M 17.2.0 e1d6a67b021e 65d6ddd1bac4 2026-03-08T23:06:57.682 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.vm00 vm00 *:9095 running (96s) 48s ago 96s 36.7M - 514e6a882f6e 2ba0b285b4d9 2026-03-08T23:06:57.725 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:57 vm00 ceph-mon[47668]: from='client.14508 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:57.727 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph orch ls' 2026-03-08T23:06:58.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:57 vm08 ceph-mon[56824]: from='client.14508 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:06:58.214 INFO:teuthology.orchestra.run.vm00.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-08T23:06:58.214 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager ?:9093,9094 1/1 48s ago 2m count:1 2026-03-08T23:06:58.214 INFO:teuthology.orchestra.run.vm00.stdout:crash 2/2 48s ago 2m * 2026-03-08T23:06:58.215 INFO:teuthology.orchestra.run.vm00.stdout:grafana ?:3000 1/1 48s ago 2m count:1 2026-03-08T23:06:58.215 INFO:teuthology.orchestra.run.vm00.stdout:mgr 2/2 48s ago 2m count:2 2026-03-08T23:06:58.215 INFO:teuthology.orchestra.run.vm00.stdout:mon 2/2 48s ago 2m vm00:192.168.123.100=vm00;vm08:192.168.123.108=vm08;count:2 2026-03-08T23:06:58.215 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter ?:9100 2/2 48s ago 2m * 2026-03-08T23:06:58.215 INFO:teuthology.orchestra.run.vm00.stdout:osd 8 48s ago - 2026-03-08T23:06:58.215 INFO:teuthology.orchestra.run.vm00.stdout:prometheus ?:9095 1/1 48s ago 2m count:1 2026-03-08T23:06:58.286 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph orch host ls' 2026-03-08T23:06:58.748 INFO:teuthology.orchestra.run.vm00.stdout:HOST ADDR LABELS STATUS 2026-03-08T23:06:58.748 INFO:teuthology.orchestra.run.vm00.stdout:vm00 192.168.123.100 2026-03-08T23:06:58.749 INFO:teuthology.orchestra.run.vm00.stdout:vm08 192.168.123.108 2026-03-08T23:06:58.749 INFO:teuthology.orchestra.run.vm00.stdout:2 hosts in cluster 2026-03-08T23:06:58.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:58 vm00 ceph-mon[47668]: from='client.14516 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:58.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:58 vm00 ceph-mon[47668]: pgmap v88: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-08T23:06:58.760 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:58 vm00 ceph-mon[47668]: from='client.14520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:58.812 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph orch device ls' 2026-03-08T23:06:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:58 vm08 ceph-mon[56824]: from='client.14516 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:58 vm08 ceph-mon[56824]: pgmap v88: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s, 0 objects/s recovering 2026-03-08T23:06:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:58 vm08 ceph-mon[56824]: from='client.14520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REJECT REASONS 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdb hdd DWNBRSTVMM00001 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdc hdd DWNBRSTVMM00002 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdd hdd DWNBRSTVMM00003 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vde hdd DWNBRSTVMM00004 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm08 /dev/vdb hdd DWNBRSTVMM08001 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm08 /dev/vdc hdd DWNBRSTVMM08002 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm08 /dev/vdd hdd DWNBRSTVMM08003 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.279 INFO:teuthology.orchestra.run.vm00.stdout:vm08 /dev/vde hdd DWNBRSTVMM08004 21.4G Insufficient space (<10 extents) on vgs, LVM detected, locked 2026-03-08T23:06:59.342 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-08T23:06:59.345 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm00.local 2026-03-08T23:06:59.346 DEBUG:teuthology.orchestra.run.vm00:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-08T23:06:59.374 INFO:teuthology.orchestra.run.vm00.stderr:+ systemctl stop nfs-server 2026-03-08T23:06:59.381 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm08.local 2026-03-08T23:06:59.381 DEBUG:teuthology.orchestra.run.vm08:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-08T23:06:59.408 INFO:teuthology.orchestra.run.vm08.stderr:+ systemctl stop nfs-server 2026-03-08T23:06:59.415 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-08T23:06:59.417 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-03-08T23:06:59.417 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph fs volume create foofs' 2026-03-08T23:06:59.701 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:59 vm08 ceph-mon[56824]: from='client.14524 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:59.701 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:06:59 vm08 ceph-mon[56824]: from='client.14528 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:59.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:59 vm00 ceph-mon[47668]: from='client.14524 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:06:59.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:06:59 vm00 ceph-mon[47668]: from='client.14528 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:00 vm08 ceph-mon[56824]: from='client.14532 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:00 vm08 ceph-mon[56824]: pgmap v89: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:07:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:00 vm08 ceph-mon[56824]: from='client.14536 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:00 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-08T23:07:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:00 vm00 ceph-mon[47668]: from='client.14532 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:00 vm00 ceph-mon[47668]: pgmap v89: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:07:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:00 vm00 ceph-mon[47668]: from='client.14536 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:00 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-08T23:07:01.831 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-08T23:07:01.834 INFO:tasks.cephadm:Waiting for ceph service mds.foofs to start (timeout 300)... 2026-03-08T23:07:01.834 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch ls -f json 2026-03-08T23:07:02.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:01 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-08T23:07:02.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:01 vm00 ceph-mon[47668]: osdmap e45: 8 total, 8 up, 8 in 2026-03-08T23:07:02.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:01 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-08T23:07:02.007 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:01 vm00 ceph-cabe2722-1b42-11f1-9450-0d39870fd3ae-mon-vm00[47664]: 2026-03-08T23:07:01.737+0000 7ff83353a700 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-08T23:07:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:01 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-08T23:07:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:01 vm08 ceph-mon[56824]: osdmap e45: 8 total, 8 up, 8 in 2026-03-08T23:07:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:01 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-08T23:07:02.420 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:07:02.420 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-08T23:04:06.790020Z", "last_refresh": "2026-03-08T23:06:09.393907Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-08T23:04:05.553046Z", "last_refresh": "2026-03-08T23:06:09.393935Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "status": {"created": "2026-03-08T23:04:06.129914Z", "last_refresh": "2026-03-08T23:06:09.393960Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-08T23:07:01.782124Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-08T23:07:01.775858Z", "running": 0, "size": 2}}, {"placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-08T23:04:05.279124Z", "last_refresh": "2026-03-08T23:06:09.393876Z", "running": 2, "size": 2}}, {"placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-08T23:04:23.377648Z", "last_refresh": "2026-03-08T23:06:09.393770Z", "running": 2, "size": 2}}, {"placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-08T23:04:06.488831Z", "last_refresh": "2026-03-08T23:06:09.393984Z", "ports": [9100], "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", "container_image_name": "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", "last_refresh": "2026-03-08T23:06:09.394033Z", "running": 8, "size": 8}, "unmanaged": true}, {"placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-08T23:04:05.824375Z", "last_refresh": "2026-03-08T23:06:09.394009Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-08T23:07:02.506 INFO:tasks.cephadm:mds.foofs has 0/2 2026-03-08T23:07:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: pgmap v91: 33 pgs: 5 creating+peering, 1 active+clean, 27 unknown; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: osdmap e46: 8 total, 8 up, 8 in 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: osdmap e47: 8 total, 8 up, 8 in 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: fsmap foofs:0 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: Saving service mds.foofs spec with placement count:2 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.sofkpj", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.sofkpj", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:02 vm00 ceph-mon[47668]: Deploying daemon mds.foofs.vm00.sofkpj on vm00 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: pgmap v91: 33 pgs: 5 creating+peering, 1 active+clean, 27 unknown; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: osdmap e46: 8 total, 8 up, 8 in 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: osdmap e47: 8 total, 8 up, 8 in 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: fsmap foofs:0 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: Saving service mds.foofs spec with placement count:2 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.sofkpj", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm00.sofkpj", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:03.122 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:02 vm08 ceph-mon[56824]: Deploying daemon mds.foofs.vm00.sofkpj on vm00 2026-03-08T23:07:03.506 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch ls -f json 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: osdmap e48: 8 total, 8 up, 8 in 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm08.gnfjzr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm08.gnfjzr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: Deploying daemon mds.foofs.vm08.gnfjzr on vm08 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: daemon mds.foofs.vm00.sofkpj assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: Cluster is now healthy 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: mds.? [v2:192.168.123.100:6834/3956723533,v1:192.168.123.100:6835/3956723533] up:boot 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: fsmap foofs:1 {0=foofs.vm00.sofkpj=up:creating} 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata", "who": "foofs.vm00.sofkpj"}]: dispatch 2026-03-08T23:07:04.058 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:03 vm00 ceph-mon[47668]: daemon mds.foofs.vm00.sofkpj is now active in filesystem foofs as rank 0 2026-03-08T23:07:04.058 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:07:04.058 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-08T23:04:06.790020Z", "last_refresh": "2026-03-08T23:06:09.393907Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-08T23:04:05.553046Z", "last_refresh": "2026-03-08T23:06:09.393935Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "status": {"created": "2026-03-08T23:04:06.129914Z", "last_refresh": "2026-03-08T23:06:09.393960Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-08T23:07:01.782124Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-08T23:07:01.775858Z", "running": 0, "size": 2}}, {"placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-08T23:04:05.279124Z", "last_refresh": "2026-03-08T23:06:09.393876Z", "running": 2, "size": 2}}, {"placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-08T23:04:23.377648Z", "last_refresh": "2026-03-08T23:06:09.393770Z", "running": 2, "size": 2}}, {"placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-08T23:04:06.488831Z", "last_refresh": "2026-03-08T23:06:09.393984Z", "ports": [9100], "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", "container_image_name": "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", "last_refresh": "2026-03-08T23:06:09.394033Z", "running": 8, "size": 8}, "unmanaged": true}, {"placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-08T23:04:05.824375Z", "last_refresh": "2026-03-08T23:06:09.394009Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: osdmap e48: 8 total, 8 up, 8 in 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm08.gnfjzr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm08.gnfjzr", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: Deploying daemon mds.foofs.vm08.gnfjzr on vm08 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: daemon mds.foofs.vm00.sofkpj assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: Cluster is now healthy 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: mds.? [v2:192.168.123.100:6834/3956723533,v1:192.168.123.100:6835/3956723533] up:boot 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: fsmap foofs:1 {0=foofs.vm00.sofkpj=up:creating} 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata", "who": "foofs.vm00.sofkpj"}]: dispatch 2026-03-08T23:07:04.070 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:03 vm08 ceph-mon[56824]: daemon mds.foofs.vm00.sofkpj is now active in filesystem foofs as rank 0 2026-03-08T23:07:04.122 INFO:tasks.cephadm:mds.foofs has 0/2 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: pgmap v95: 65 pgs: 9 creating+peering, 6 active+clean, 50 unknown; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: osdmap e49: 8 total, 8 up, 8 in 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: mds.? [v2:192.168.123.100:6834/3956723533,v1:192.168.123.100:6835/3956723533] up:active 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: mds.? [v2:192.168.123.108:6832/341536788,v1:192.168.123.108:6833/341536788] up:boot 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: fsmap foofs:1 {0=foofs.vm00.sofkpj=up:active} 1 up:standby 2026-03-08T23:07:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata", "who": "foofs.vm08.gnfjzr"}]: dispatch 2026-03-08T23:07:04.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: fsmap foofs:1 {0=foofs.vm00.sofkpj=up:active} 1 up:standby 2026-03-08T23:07:04.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:04 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:05.122 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- ceph orch ls -f json 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: pgmap v95: 65 pgs: 9 creating+peering, 6 active+clean, 50 unknown; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: osdmap e49: 8 total, 8 up, 8 in 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: mds.? [v2:192.168.123.100:6834/3956723533,v1:192.168.123.100:6835/3956723533] up:active 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: mds.? [v2:192.168.123.108:6832/341536788,v1:192.168.123.108:6833/341536788] up:boot 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: fsmap foofs:1 {0=foofs.vm00.sofkpj=up:active} 1 up:standby 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mds metadata", "who": "foofs.vm08.gnfjzr"}]: dispatch 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: fsmap foofs:1 {0=foofs.vm00.sofkpj=up:active} 1 up:standby 2026-03-08T23:07:05.132 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:04 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:05.594 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-08T23:07:05.594 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-08T23:04:06.790020Z", "last_refresh": "2026-03-08T23:07:05.135975Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-08T23:04:05.553046Z", "last_refresh": "2026-03-08T23:07:04.607983Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "status": {"created": "2026-03-08T23:04:06.129914Z", "last_refresh": "2026-03-08T23:07:05.136029Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-08T23:07:01.782124Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-08T23:07:01.775858Z", "last_refresh": "2026-03-08T23:07:04.608469Z", "running": 2, "size": 2}}, {"placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-08T23:04:05.279124Z", "last_refresh": "2026-03-08T23:07:04.608122Z", "running": 2, "size": 2}}, {"placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-08T23:04:23.377648Z", "last_refresh": "2026-03-08T23:07:04.608177Z", "running": 2, "size": 2}}, {"placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-08T23:04:06.488831Z", "last_refresh": "2026-03-08T23:07:04.608222Z", "ports": [9100], "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", "container_image_name": "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", "last_refresh": "2026-03-08T23:07:04.608269Z", "running": 8, "size": 8}, "unmanaged": true}, {"placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-08T23:04:05.824375Z", "last_refresh": "2026-03-08T23:07:05.136079Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-08T23:07:05.666 INFO:tasks.cephadm:mds.foofs has 2/2 2026-03-08T23:07:05.666 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-08T23:07:05.668 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-03-08T23:07:05.668 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2' 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='client.24329 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:07:05.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='client.24329 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:07:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:07:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:06 vm08 ceph-mon[56824]: pgmap v97: 65 pgs: 9 creating+peering, 39 active+clean, 17 unknown; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s wr, 5 op/s 2026-03-08T23:07:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:06 vm08 ceph-mon[56824]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:07:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "pool": ".nfs"}]: dispatch 2026-03-08T23:07:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:06 vm00 ceph-mon[47668]: pgmap v97: 65 pgs: 9 creating+peering, 39 active+clean, 17 unknown; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s wr, 5 op/s 2026-03-08T23:07:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:06 vm00 ceph-mon[47668]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:07:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool create", "pool": ".nfs"}]: dispatch 2026-03-08T23:07:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:07 vm08 ceph-mon[56824]: from='client.14560 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "placement": "2", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:07 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:07 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "pool": ".nfs"}]': finished 2026-03-08T23:07:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:07 vm08 ceph-mon[56824]: osdmap e50: 8 total, 8 up, 8 in 2026-03-08T23:07:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:07 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-08T23:07:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:07 vm00 ceph-mon[47668]: from='client.14560 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "placement": "2", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool create", "pool": ".nfs"}]': finished 2026-03-08T23:07:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:07 vm00 ceph-mon[47668]: osdmap e50: 8 total, 8 up, 8 in 2026-03-08T23:07:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:07 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-08T23:07:08.211 INFO:teuthology.orchestra.run.vm00.stdout:NFS Cluster Created Successfully 2026-03-08T23:07:08.267 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake' 2026-03-08T23:07:08.847 INFO:teuthology.orchestra.run.vm00.stderr:Invalid command: Unexpected argument '--clusterid' 2026-03-08T23:07:08.847 INFO:teuthology.orchestra.run.vm00.stderr:nfs export create cephfs [] [--readonly] [--client_addr ...] [--squash ] : Create a CephFS export 2026-03-08T23:07:08.848 INFO:teuthology.orchestra.run.vm00.stderr:Error EINVAL: invalid command 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: pgmap v99: 97 pgs: 9 creating+peering, 23 unknown, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s wr, 5 op/s 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: osdmap e51: 8 total, 8 up, 8 in 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.040 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:07:09.041 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:08 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.109 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-08T23:07:09.110 INFO:teuthology.orchestra.run.vm00.stdout: "bind": "/fake", 2026-03-08T23:07:09.110 INFO:teuthology.orchestra.run.vm00.stdout: "fs": "foofs", 2026-03-08T23:07:09.110 INFO:teuthology.orchestra.run.vm00.stdout: "path": "/", 2026-03-08T23:07:09.110 INFO:teuthology.orchestra.run.vm00.stdout: "cluster": "foo", 2026-03-08T23:07:09.110 INFO:teuthology.orchestra.run.vm00.stdout: "mode": "RW" 2026-03-08T23:07:09.110 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: pgmap v99: 97 pgs: 9 creating+peering, 23 unknown, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s wr, 5 op/s 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: osdmap e51: 8 total, 8 up, 8 in 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm00.xabagl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:08 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.197 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cabe2722-1b42-11f1-9450-0d39870fd3ae -- bash -c 'while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done' 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: Saving service nfs.foo spec with placement count:2 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: Creating key for client.nfs.foo.0.0.vm00.xabagl 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: Rados config object exists: conf-nfs.foo 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: Creating key for client.nfs.foo.0.0.vm00.xabagl-rgw 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: Deploying daemon nfs.foo.0.0.vm00.xabagl on vm00 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: osdmap e52: 8 total, 8 up, 8 in 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:07:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:09 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: Saving service nfs.foo spec with placement count:2 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: Creating key for client.nfs.foo.0.0.vm00.xabagl 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: Rados config object exists: conf-nfs.foo 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: Creating key for client.nfs.foo.0.0.vm00.xabagl-rgw 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: Deploying daemon nfs.foo.0.0.vm00.xabagl on vm00 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: osdmap e52: 8 total, 8 up, 8 in 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.1.0.vm08.jjisdo-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:07:09.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:09 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: Creating key for client.nfs.foo.1.0.vm08.jjisdo 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: Ensuring nfs.foo.1 is in the ganesha grace table 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: pgmap v102: 97 pgs: 9 creating+peering, 10 unknown, 78 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 180 B/s rd, 2.6 KiB/s wr, 8 op/s 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: Rados config object exists: conf-nfs.foo 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: Creating key for client.nfs.foo.1.0.vm08.jjisdo-rgw 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: Deploying daemon nfs.foo.1.0.vm08.jjisdo on vm08 2026-03-08T23:07:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: from='client.14600 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: mgrmap e20: vm00.pkgtpt(active, since 2m), standbys: vm08.fufswh 2026-03-08T23:07:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:10 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: Creating key for client.nfs.foo.1.0.vm08.jjisdo 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: Ensuring nfs.foo.1 is in the ganesha grace table 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: pgmap v102: 97 pgs: 9 creating+peering, 10 unknown, 78 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 180 B/s rd, 2.6 KiB/s wr, 8 op/s 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: Rados config object exists: conf-nfs.foo 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: Creating key for client.nfs.foo.1.0.vm08.jjisdo-rgw 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: Deploying daemon nfs.foo.1.0.vm08.jjisdo on vm08 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: from='client.14600 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: mgrmap e20: vm00.pkgtpt(active, since 2m), standbys: vm08.fufswh 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:11.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:10 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:12.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:11 vm00 ceph-mon[47668]: from='client.14608 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:12.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:11 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:11 vm08 ceph-mon[56824]: from='client.14608 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:11 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:12.189 INFO:teuthology.orchestra.run.vm00.stdout:nfs.foo ?:2049 2/2 0s ago 3s count:2 2026-03-08T23:07:12.234 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-08T23:07:12.237 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm00.local 2026-03-08T23:07:12.237 DEBUG:teuthology.orchestra.run.vm00:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mkdir /mnt/foo' 2026-03-08T23:07:12.263 INFO:teuthology.orchestra.run.vm00.stderr:+ mkdir /mnt/foo 2026-03-08T23:07:12.265 DEBUG:teuthology.orchestra.run.vm00:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done' 2026-03-08T23:07:12.332 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:12.333 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:12.540 INFO:teuthology.orchestra.run.vm00.stderr:Created symlink /run/systemd/system/remote-fs.target.wants/rpc-statd.service → /usr/lib/systemd/system/rpc-statd.service. 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: pgmap v103: 97 pgs: 9 creating+peering, 88 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 5 op/s 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: pgmap v103: 97 pgs: 9 creating+peering, 88 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 5 op/s 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:13.737 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:13.737 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:13 vm08 ceph-mon[56824]: from='client.14612 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:14.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:13 vm00 ceph-mon[47668]: from='client.14612 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:07:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:14 vm08 ceph-mon[56824]: pgmap v104: 97 pgs: 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 325 B/s rd, 651 B/s wr, 3 op/s 2026-03-08T23:07:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:14 vm00 ceph-mon[47668]: pgmap v104: 97 pgs: 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 325 B/s rd, 651 B/s wr, 3 op/s 2026-03-08T23:07:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:16 vm00 ceph-mon[47668]: pgmap v105: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 3.4 KiB/s wr, 10 op/s 2026-03-08T23:07:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:16 vm08 ceph-mon[56824]: pgmap v105: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 3.4 KiB/s wr, 10 op/s 2026-03-08T23:07:18.739 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:18.740 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:18.765 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:18.765 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:18 vm00 ceph-mon[47668]: pgmap v106: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.9 KiB/s rd, 2.9 KiB/s wr, 9 op/s 2026-03-08T23:07:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:18 vm08 ceph-mon[56824]: pgmap v106: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.9 KiB/s rd, 2.9 KiB/s wr, 9 op/s 2026-03-08T23:07:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:20 vm00 ceph-mon[47668]: pgmap v107: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 2.4 KiB/s wr, 6 op/s 2026-03-08T23:07:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:20 vm08 ceph-mon[56824]: pgmap v107: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 2.4 KiB/s wr, 6 op/s 2026-03-08T23:07:23.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:22 vm00 ceph-mon[47668]: pgmap v108: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 2.9 KiB/s rd, 2.2 KiB/s wr, 5 op/s 2026-03-08T23:07:23.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:22 vm08 ceph-mon[56824]: pgmap v108: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 2.9 KiB/s rd, 2.2 KiB/s wr, 5 op/s 2026-03-08T23:07:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:07:23.767 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:23.767 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:23.793 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:23.794 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:24 vm08 ceph-mon[56824]: pgmap v109: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 2.1 KiB/s wr, 5 op/s 2026-03-08T23:07:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:24 vm00 ceph-mon[47668]: pgmap v109: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 2.1 KiB/s wr, 5 op/s 2026-03-08T23:07:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:26 vm08 ceph-mon[56824]: pgmap v110: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 2.2 KiB/s wr, 5 op/s 2026-03-08T23:07:27.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:26 vm00 ceph-mon[47668]: pgmap v110: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 2.2 KiB/s wr, 5 op/s 2026-03-08T23:07:28.795 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:28.796 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:28.822 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:28.823 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:28 vm08 ceph-mon[56824]: pgmap v111: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:28 vm00 ceph-mon[47668]: pgmap v111: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:30 vm08 ceph-mon[56824]: pgmap v112: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:30 vm00 ceph-mon[47668]: pgmap v112: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:32 vm08 ceph-mon[56824]: pgmap v113: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:32 vm00 ceph-mon[47668]: pgmap v113: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:33.824 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:33.825 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:33.852 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:33.853 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:35 vm08 ceph-mon[56824]: pgmap v114: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:34 vm00 ceph-mon[47668]: pgmap v114: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:37 vm08 ceph-mon[56824]: pgmap v115: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:37 vm00 ceph-mon[47668]: pgmap v115: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:38.854 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:38.855 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:38.882 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:38.883 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:39 vm08 ceph-mon[56824]: pgmap v116: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:39 vm00 ceph-mon[47668]: pgmap v116: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:40.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:40 vm08 ceph-mon[56824]: pgmap v117: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:40.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:40 vm00 ceph-mon[47668]: pgmap v117: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:42 vm08 ceph-mon[56824]: pgmap v118: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:42 vm00 ceph-mon[47668]: pgmap v118: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:43.884 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:43.885 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:43.911 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:43.912 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:44.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:44 vm08 ceph-mon[56824]: pgmap v119: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:44.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:44 vm00 ceph-mon[47668]: pgmap v119: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:46 vm08 ceph-mon[56824]: pgmap v120: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:46 vm00 ceph-mon[47668]: pgmap v120: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:48 vm08 ceph-mon[56824]: pgmap v121: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:48.913 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:48.914 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:48 vm00 ceph-mon[47668]: pgmap v121: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:48.940 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:48.940 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:50 vm08 ceph-mon[56824]: pgmap v122: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:50 vm00 ceph-mon[47668]: pgmap v122: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:52.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:52 vm08 ceph-mon[56824]: pgmap v123: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:52.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:52 vm00 ceph-mon[47668]: pgmap v123: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:53.942 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:53.942 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:53.968 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:53.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:07:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:54 vm08 ceph-mon[56824]: pgmap v124: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:54 vm00 ceph-mon[47668]: pgmap v124: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:56 vm08 ceph-mon[56824]: pgmap v125: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:56 vm00 ceph-mon[47668]: pgmap v125: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:07:58.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:07:58 vm08 ceph-mon[56824]: pgmap v126: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:58.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:07:58 vm00 ceph-mon[47668]: pgmap v126: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:07:58.970 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:07:58.970 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:07:59.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:07:59.012 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:00 vm08 ceph-mon[56824]: pgmap v127: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:00 vm00 ceph-mon[47668]: pgmap v127: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:02 vm08 ceph-mon[56824]: pgmap v128: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:02 vm00 ceph-mon[47668]: pgmap v128: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:04.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:04.014 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:04.039 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:04.039 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:04 vm08 ceph-mon[56824]: pgmap v129: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:04.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:04 vm00 ceph-mon[47668]: pgmap v129: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1b", "id": [7, 2]}]: dispatch 2026-03-08T23:08:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.4", "id": [1, 0]}]: dispatch 2026-03-08T23:08:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-08T23:08:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:08:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1b", "id": [3, 5]}]: dispatch 2026-03-08T23:08:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-08T23:08:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:08:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1b", "id": [7, 2]}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.4", "id": [1, 0]}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1b", "id": [3, 5]}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:08:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: pgmap v130: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1b", "id": [7, 2]}]': finished 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.4", "id": [1, 0]}]': finished 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1b", "id": [3, 5]}]': finished 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-08T23:08:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:06 vm08 ceph-mon[56824]: osdmap e53: 8 total, 8 up, 8 in 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: pgmap v130: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1b", "id": [7, 2]}]': finished 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.4", "id": [1, 0]}]': finished 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1b", "id": [3, 5]}]': finished 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-08T23:08:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:06 vm00 ceph-mon[47668]: osdmap e53: 8 total, 8 up, 8 in 2026-03-08T23:08:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:07 vm00 ceph-mon[47668]: osdmap e54: 8 total, 8 up, 8 in 2026-03-08T23:08:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:07 vm08 ceph-mon[56824]: osdmap e54: 8 total, 8 up, 8 in 2026-03-08T23:08:08.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:08 vm00 ceph-mon[47668]: pgmap v133: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:09.041 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:09.041 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:09.074 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:09.075 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:08 vm08 ceph-mon[56824]: pgmap v133: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:10 vm08 ceph-mon[56824]: pgmap v134: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 127 B/s wr, 0 op/s 2026-03-08T23:08:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:10 vm00 ceph-mon[47668]: pgmap v134: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 127 B/s wr, 0 op/s 2026-03-08T23:08:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:12 vm00 ceph-mon[47668]: pgmap v135: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:08:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:08:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:08:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:08:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:08:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:12 vm08 ceph-mon[56824]: pgmap v135: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:08:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:08:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:08:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:08:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:08:14.076 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:14.077 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:14.102 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:14.103 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:14 vm08 ceph-mon[56824]: pgmap v136: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 127 B/s wr, 0 op/s 2026-03-08T23:08:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:14 vm00 ceph-mon[47668]: pgmap v136: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 127 B/s wr, 0 op/s 2026-03-08T23:08:17.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:16 vm08 ceph-mon[56824]: pgmap v137: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 208 B/s wr, 0 op/s 2026-03-08T23:08:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:16 vm00 ceph-mon[47668]: pgmap v137: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 208 B/s wr, 0 op/s 2026-03-08T23:08:19.104 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:19.105 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:18 vm08 ceph-mon[56824]: pgmap v138: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 189 B/s rd, 189 B/s wr, 0 op/s 2026-03-08T23:08:19.130 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:19.131 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:18 vm00 ceph-mon[47668]: pgmap v138: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 189 B/s rd, 189 B/s wr, 0 op/s 2026-03-08T23:08:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:20 vm08 ceph-mon[56824]: pgmap v139: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:20 vm00 ceph-mon[47668]: pgmap v139: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:22 vm00 ceph-mon[47668]: pgmap v140: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:22 vm08 ceph-mon[56824]: pgmap v140: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:24.132 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:24.132 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:24.158 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:24.158 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:24 vm08 ceph-mon[56824]: pgmap v141: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:24 vm00 ceph-mon[47668]: pgmap v141: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:27.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:26 vm08 ceph-mon[56824]: pgmap v142: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:26 vm00 ceph-mon[47668]: pgmap v142: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:28 vm08 ceph-mon[56824]: pgmap v143: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:29.160 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:29.160 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:28 vm00 ceph-mon[47668]: pgmap v143: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:29.186 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:29.186 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:30 vm08 ceph-mon[56824]: pgmap v144: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:30 vm00 ceph-mon[47668]: pgmap v144: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:33.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:32 vm00 ceph-mon[47668]: pgmap v145: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:32 vm08 ceph-mon[56824]: pgmap v145: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:34.188 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:34.188 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:34.214 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:34.214 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:34 vm08 ceph-mon[56824]: pgmap v146: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:34 vm00 ceph-mon[47668]: pgmap v146: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:36 vm08 ceph-mon[56824]: pgmap v147: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:36 vm00 ceph-mon[47668]: pgmap v147: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:38 vm08 ceph-mon[56824]: pgmap v148: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:38 vm00 ceph-mon[47668]: pgmap v148: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:39.216 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:39.217 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:39.243 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:39.244 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:40 vm08 ceph-mon[56824]: pgmap v149: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:40 vm00 ceph-mon[47668]: pgmap v149: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:43.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:42 vm00 ceph-mon[47668]: pgmap v150: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:42 vm08 ceph-mon[56824]: pgmap v150: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:44.245 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:44.245 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:44.271 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:44.272 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:44 vm00 ceph-mon[47668]: pgmap v151: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:44 vm08 ceph-mon[56824]: pgmap v151: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:46 vm00 ceph-mon[47668]: pgmap v152: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:46 vm08 ceph-mon[56824]: pgmap v152: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:48 vm00 ceph-mon[47668]: pgmap v153: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:49.273 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:49.274 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:49.299 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:49.300 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:48 vm08 ceph-mon[56824]: pgmap v153: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:50 vm00 ceph-mon[47668]: pgmap v154: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:50 vm08 ceph-mon[56824]: pgmap v154: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:52 vm00 ceph-mon[47668]: pgmap v155: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:52 vm08 ceph-mon[56824]: pgmap v155: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:54.301 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:54.301 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:54.326 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:54.327 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:54 vm00 ceph-mon[47668]: pgmap v156: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:54 vm08 ceph-mon[56824]: pgmap v156: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:56 vm08 ceph-mon[56824]: pgmap v157: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:56 vm00 ceph-mon[47668]: pgmap v157: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:08:59.328 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:08:59.328 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:08:59.353 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:08:59.354 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:08:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:08:58 vm08 ceph-mon[56824]: pgmap v158: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:08:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:08:58 vm00 ceph-mon[47668]: pgmap v158: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:00 vm08 ceph-mon[56824]: pgmap v159: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:00 vm00 ceph-mon[47668]: pgmap v159: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:02.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:02 vm08 ceph-mon[56824]: pgmap v160: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:02.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:02 vm00 ceph-mon[47668]: pgmap v160: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:04.355 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:04.356 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:04.385 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:04.386 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:04.831 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:04 vm00 ceph-mon[47668]: pgmap v161: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:04 vm08 ceph-mon[56824]: pgmap v161: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:09:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:09:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:09:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:09:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:06 vm08 ceph-mon[56824]: pgmap v162: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:06 vm00 ceph-mon[47668]: pgmap v162: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:08.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:08 vm08 ceph-mon[56824]: pgmap v163: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:08.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:08 vm00 ceph-mon[47668]: pgmap v163: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:09.387 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:09.388 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:09.413 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:09.413 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:10 vm08 ceph-mon[56824]: pgmap v164: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:10.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:10 vm00 ceph-mon[47668]: pgmap v164: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:12.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:12 vm08 ceph-mon[56824]: pgmap v165: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:12.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:09:12.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:09:12.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:09:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:12 vm00 ceph-mon[47668]: pgmap v165: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:09:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:09:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:09:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:09:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:09:13.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:09:13.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:09:14.415 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:14.415 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:14.440 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:14.441 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:14.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:14 vm08 ceph-mon[56824]: pgmap v166: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:14.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:14 vm00 ceph-mon[47668]: pgmap v166: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:16 vm08 ceph-mon[56824]: pgmap v167: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:16 vm00 ceph-mon[47668]: pgmap v167: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:18.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:18 vm08 ceph-mon[56824]: pgmap v168: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:18.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:18 vm00 ceph-mon[47668]: pgmap v168: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:19.442 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:19.442 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:19.469 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:19.469 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:20.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:20 vm08 ceph-mon[56824]: pgmap v169: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:20 vm00 ceph-mon[47668]: pgmap v169: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:22 vm00 ceph-mon[47668]: pgmap v170: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:22 vm08 ceph-mon[56824]: pgmap v170: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:24.471 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:24.471 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:24.495 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:24.496 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:24.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:24 vm00 ceph-mon[47668]: pgmap v171: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:24 vm08 ceph-mon[56824]: pgmap v171: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:26 vm08 ceph-mon[56824]: pgmap v172: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:26 vm00 ceph-mon[47668]: pgmap v172: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:28.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:28 vm00 ceph-mon[47668]: pgmap v173: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:28 vm08 ceph-mon[56824]: pgmap v173: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:29.497 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:29.498 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:29.524 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:29.525 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:30.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:30 vm00 ceph-mon[47668]: pgmap v174: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:30 vm08 ceph-mon[56824]: pgmap v174: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:32 vm00 ceph-mon[47668]: pgmap v175: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:32 vm08 ceph-mon[56824]: pgmap v175: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:34.526 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:34.526 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:34.551 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:34.552 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:34.831 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:34 vm00 ceph-mon[47668]: pgmap v176: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:34.929 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:34 vm08 ceph-mon[56824]: pgmap v176: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:36 vm08 ceph-mon[56824]: pgmap v177: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:36 vm00 ceph-mon[47668]: pgmap v177: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:38 vm08 ceph-mon[56824]: pgmap v178: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:38 vm00 ceph-mon[47668]: pgmap v178: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:39.553 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:39.553 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:39.578 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:39.582 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:40 vm08 ceph-mon[56824]: pgmap v179: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:40 vm00 ceph-mon[47668]: pgmap v179: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:42 vm00 ceph-mon[47668]: pgmap v180: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:42 vm08 ceph-mon[56824]: pgmap v180: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:44.583 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:44.584 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:44.610 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:44.611 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:44 vm08 ceph-mon[56824]: pgmap v181: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:44 vm00 ceph-mon[47668]: pgmap v181: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:46 vm08 ceph-mon[56824]: pgmap v182: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:46 vm00 ceph-mon[47668]: pgmap v182: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:48 vm08 ceph-mon[56824]: pgmap v183: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:48 vm00 ceph-mon[47668]: pgmap v183: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:49.612 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:49.612 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:49.640 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:49.640 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:50 vm08 ceph-mon[56824]: pgmap v184: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:50 vm00 ceph-mon[47668]: pgmap v184: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:52 vm00 ceph-mon[47668]: pgmap v185: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:52 vm08 ceph-mon[56824]: pgmap v185: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:54.642 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:54.642 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:54.667 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:54.668 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:09:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:54 vm08 ceph-mon[56824]: pgmap v186: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:54 vm00 ceph-mon[47668]: pgmap v186: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:56 vm08 ceph-mon[56824]: pgmap v187: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:56 vm00 ceph-mon[47668]: pgmap v187: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:09:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:09:58 vm08 ceph-mon[56824]: pgmap v188: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:09:58 vm00 ceph-mon[47668]: pgmap v188: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:09:59.669 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:09:59.670 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:09:59.695 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:09:59.696 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:00 vm08 ceph-mon[56824]: pgmap v189: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:00 vm08 ceph-mon[56824]: overall HEALTH_OK 2026-03-08T23:10:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:00 vm00 ceph-mon[47668]: pgmap v189: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:00 vm00 ceph-mon[47668]: overall HEALTH_OK 2026-03-08T23:10:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:02 vm08 ceph-mon[56824]: pgmap v190: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:02 vm00 ceph-mon[47668]: pgmap v190: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:04.697 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:04.697 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:04.724 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:04.725 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:04 vm08 ceph-mon[56824]: pgmap v191: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:04 vm00 ceph-mon[47668]: pgmap v191: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:10:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:10:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:10:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:10:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:06 vm08 ceph-mon[56824]: pgmap v192: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:06 vm00 ceph-mon[47668]: pgmap v192: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:08 vm08 ceph-mon[56824]: pgmap v193: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:08 vm00 ceph-mon[47668]: pgmap v193: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:09.726 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:09.726 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:09.751 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:09.752 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:10 vm08 ceph-mon[56824]: pgmap v194: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:10 vm00 ceph-mon[47668]: pgmap v194: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:12 vm08 ceph-mon[56824]: pgmap v195: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:10:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:10:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:10:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:10:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:12 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:12 vm00 ceph-mon[47668]: pgmap v195: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:12 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:10:14.753 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:14.754 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:14.781 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:14.781 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:14 vm08 ceph-mon[56824]: pgmap v196: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:14 vm00 ceph-mon[47668]: pgmap v196: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:17 vm08 ceph-mon[56824]: pgmap v197: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:17 vm00 ceph-mon[47668]: pgmap v197: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:19 vm08 ceph-mon[56824]: pgmap v198: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:19 vm00 ceph-mon[47668]: pgmap v198: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:19.783 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:19.783 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:19.809 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:19.810 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:21 vm08 ceph-mon[56824]: pgmap v199: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:21 vm00 ceph-mon[47668]: pgmap v199: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:23 vm08 ceph-mon[56824]: pgmap v200: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:23 vm00 ceph-mon[47668]: pgmap v200: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:24.812 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:24.812 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:24.839 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:24.840 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:25 vm08 ceph-mon[56824]: pgmap v201: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:25 vm00 ceph-mon[47668]: pgmap v201: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:27 vm08 ceph-mon[56824]: pgmap v202: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:27 vm00 ceph-mon[47668]: pgmap v202: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:28.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:28 vm08 ceph-mon[56824]: pgmap v203: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:28.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:28 vm00 ceph-mon[47668]: pgmap v203: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:29.841 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:29.841 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:29.867 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:29.868 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:30 vm08 ceph-mon[56824]: pgmap v204: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:30.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:30 vm00 ceph-mon[47668]: pgmap v204: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:32 vm08 ceph-mon[56824]: pgmap v205: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:32 vm00 ceph-mon[47668]: pgmap v205: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:34.869 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:34.869 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:34.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:34 vm08 ceph-mon[56824]: pgmap v206: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:34.898 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:34.899 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:34 vm00 ceph-mon[47668]: pgmap v206: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:36.860 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:36 vm00 ceph-mon[47668]: pgmap v207: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:36 vm08 ceph-mon[56824]: pgmap v207: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:38.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:38 vm08 ceph-mon[56824]: pgmap v208: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:38 vm00 ceph-mon[47668]: pgmap v208: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:39.900 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:39.901 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:39.927 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:39.927 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:40 vm08 ceph-mon[56824]: pgmap v209: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:40.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:40 vm00 ceph-mon[47668]: pgmap v209: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:42 vm08 ceph-mon[56824]: pgmap v210: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:42 vm00 ceph-mon[47668]: pgmap v210: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:44.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:44 vm08 ceph-mon[56824]: pgmap v211: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:44.929 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:44.929 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:44.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:44 vm00 ceph-mon[47668]: pgmap v211: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:44.955 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:44.955 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:46 vm08 ceph-mon[56824]: pgmap v212: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:46 vm00 ceph-mon[47668]: pgmap v212: 97 pgs: 97 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:48 vm08 ceph-mon[56824]: pgmap v213: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:48 vm00 ceph-mon[47668]: pgmap v213: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:49.956 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:49.957 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:49.984 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:49.984 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:50 vm08 ceph-mon[56824]: pgmap v214: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:50 vm00 ceph-mon[47668]: pgmap v214: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:52.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:52 vm08 ceph-mon[56824]: pgmap v215: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:52.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:52 vm00 ceph-mon[47668]: pgmap v215: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:54 vm08 ceph-mon[56824]: pgmap v216: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:54 vm00 ceph-mon[47668]: pgmap v216: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:54.985 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:10:54.986 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:10:55.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:10:55.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:10:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:56 vm08 ceph-mon[56824]: pgmap v217: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:56 vm00 ceph-mon[47668]: pgmap v217: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:10:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:10:58 vm08 ceph-mon[56824]: pgmap v218: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:10:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:10:58 vm00 ceph-mon[47668]: pgmap v218: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:00.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:00.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:00.039 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:00.040 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:00 vm08 ceph-mon[56824]: pgmap v219: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:00 vm00 ceph-mon[47668]: pgmap v219: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:02 vm00 ceph-mon[47668]: pgmap v220: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:02 vm08 ceph-mon[56824]: pgmap v220: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:05.041 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:05.042 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:05.066 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:05.067 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:04 vm08 ceph-mon[56824]: pgmap v221: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:04 vm00 ceph-mon[47668]: pgmap v221: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:11:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:11:06.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:11:06.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:11:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:06 vm08 ceph-mon[56824]: pgmap v222: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:06 vm00 ceph-mon[47668]: pgmap v222: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:08 vm08 ceph-mon[56824]: pgmap v223: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:08 vm00 ceph-mon[47668]: pgmap v223: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:10.068 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:10.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:10.097 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:10.097 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:10 vm08 ceph-mon[56824]: pgmap v224: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:10 vm00 ceph-mon[47668]: pgmap v224: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:13.041 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:12 vm08 ceph-mon[56824]: pgmap v225: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:13.049 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:12 vm00 ceph-mon[47668]: pgmap v225: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:11:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:11:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:11:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:11:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:11:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:11:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:11:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:11:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:11:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:11:15.099 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:15.099 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:15.125 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:15.126 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:14 vm08 ceph-mon[56824]: pgmap v226: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:14 vm00 ceph-mon[47668]: pgmap v226: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:17.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:16 vm08 ceph-mon[56824]: pgmap v227: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:16 vm00 ceph-mon[47668]: pgmap v227: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:18 vm08 ceph-mon[56824]: pgmap v228: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:18 vm00 ceph-mon[47668]: pgmap v228: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:20.127 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:20.128 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:20.154 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:20.155 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:20 vm08 ceph-mon[56824]: pgmap v229: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:20 vm00 ceph-mon[47668]: pgmap v229: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:22 vm00 ceph-mon[47668]: pgmap v230: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:22 vm08 ceph-mon[56824]: pgmap v230: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:24 vm08 ceph-mon[56824]: pgmap v231: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:25.156 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:25.157 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:24 vm00 ceph-mon[47668]: pgmap v231: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:25.351 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:25.351 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:27.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:26 vm08 ceph-mon[56824]: pgmap v232: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:26 vm00 ceph-mon[47668]: pgmap v232: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:28 vm08 ceph-mon[56824]: pgmap v233: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:11:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:28 vm00 ceph-mon[47668]: pgmap v233: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:11:30.353 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:30.354 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:30.392 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:30.392 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:30 vm08 ceph-mon[56824]: pgmap v234: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:11:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:30 vm00 ceph-mon[47668]: pgmap v234: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:11:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:32 vm08 ceph-mon[56824]: pgmap v235: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:32 vm00 ceph-mon[47668]: pgmap v235: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:34 vm00 ceph-mon[47668]: pgmap v236: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:11:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:34 vm08 ceph-mon[56824]: pgmap v236: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:11:35.394 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:35.395 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:35.421 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:35.422 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:36 vm00 ceph-mon[47668]: pgmap v237: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:36 vm08 ceph-mon[56824]: pgmap v237: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:38 vm00 ceph-mon[47668]: pgmap v238: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:38 vm08 ceph-mon[56824]: pgmap v238: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:40.423 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:40.424 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:40.451 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:40.452 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:40 vm08 ceph-mon[56824]: pgmap v239: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:40 vm00 ceph-mon[47668]: pgmap v239: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:42 vm08 ceph-mon[56824]: pgmap v240: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:42 vm00 ceph-mon[47668]: pgmap v240: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:44 vm08 ceph-mon[56824]: pgmap v241: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:44 vm00 ceph-mon[47668]: pgmap v241: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:45.453 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:45.454 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:45.479 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:45.479 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:46 vm08 ceph-mon[56824]: pgmap v242: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:46 vm00 ceph-mon[47668]: pgmap v242: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:48 vm08 ceph-mon[56824]: pgmap v243: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:48 vm00 ceph-mon[47668]: pgmap v243: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:50.481 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:50.481 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:50.508 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:50.509 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:50 vm08 ceph-mon[56824]: pgmap v244: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:50 vm00 ceph-mon[47668]: pgmap v244: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:52 vm08 ceph-mon[56824]: pgmap v245: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:52 vm00 ceph-mon[47668]: pgmap v245: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:54 vm00 ceph-mon[47668]: pgmap v246: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:54 vm08 ceph-mon[56824]: pgmap v246: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:55.510 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:11:55.511 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:11:55.584 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:11:55.585 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:11:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:56 vm08 ceph-mon[56824]: pgmap v247: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:56 vm00 ceph-mon[47668]: pgmap v247: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:11:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:11:58 vm08 ceph-mon[56824]: pgmap v248: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:11:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:11:58 vm00 ceph-mon[47668]: pgmap v248: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:00.586 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:00.586 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:00.612 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:00.612 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:00 vm00 ceph-mon[47668]: pgmap v249: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:00 vm08 ceph-mon[56824]: pgmap v249: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:02 vm08 ceph-mon[56824]: pgmap v250: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:02 vm00 ceph-mon[47668]: pgmap v250: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:04 vm08 ceph-mon[56824]: pgmap v251: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:04 vm00 ceph-mon[47668]: pgmap v251: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:05.613 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:05.614 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:05.677 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:05.678 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:12:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:12:06.331 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:12:06.331 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:12:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:07 vm08 ceph-mon[56824]: pgmap v252: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:07 vm00 ceph-mon[47668]: pgmap v252: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:08.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:08 vm08 ceph-mon[56824]: pgmap v253: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:08 vm00 ceph-mon[47668]: pgmap v253: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:10.679 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:10.680 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:10.705 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:10.706 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:10.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:10 vm00 ceph-mon[47668]: pgmap v254: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:10 vm08 ceph-mon[56824]: pgmap v254: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:12.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:12 vm00 ceph-mon[47668]: pgmap v255: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:13.089 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:12 vm08 ceph-mon[56824]: pgmap v255: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:12:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:12:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:12:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:12:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:12:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:12:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:12:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:12:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:12:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:12:14.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:14 vm00 ceph-mon[47668]: pgmap v256: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:14 vm08 ceph-mon[56824]: pgmap v256: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:15.707 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:15.708 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:15.733 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:15.734 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:16 vm08 ceph-mon[56824]: pgmap v257: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:16 vm00 ceph-mon[47668]: pgmap v257: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:18 vm08 ceph-mon[56824]: pgmap v258: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:18 vm00 ceph-mon[47668]: pgmap v258: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:20.735 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:20.736 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:20.761 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:20.762 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:20 vm08 ceph-mon[56824]: pgmap v259: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:20 vm00 ceph-mon[47668]: pgmap v259: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:22 vm00 ceph-mon[47668]: pgmap v260: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:22 vm08 ceph-mon[56824]: pgmap v260: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:24 vm00 ceph-mon[47668]: pgmap v261: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:24 vm08 ceph-mon[56824]: pgmap v261: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:25.763 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:25.763 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:25.788 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:25.789 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:26 vm00 ceph-mon[47668]: pgmap v262: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:26 vm08 ceph-mon[56824]: pgmap v262: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:28 vm08 ceph-mon[56824]: pgmap v263: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:29.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:28 vm00 ceph-mon[47668]: pgmap v263: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:30.790 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:30.791 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:30.816 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:30.816 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:30 vm08 ceph-mon[56824]: pgmap v264: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:30 vm00 ceph-mon[47668]: pgmap v264: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:32 vm08 ceph-mon[56824]: pgmap v265: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:32 vm00 ceph-mon[47668]: pgmap v265: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:35 vm08 ceph-mon[56824]: pgmap v266: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:35 vm00 ceph-mon[47668]: pgmap v266: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:35.818 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:35.818 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:35.845 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:35.846 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:37 vm08 ceph-mon[56824]: pgmap v267: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:37 vm00 ceph-mon[47668]: pgmap v267: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:39 vm08 ceph-mon[56824]: pgmap v268: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:39 vm00 ceph-mon[47668]: pgmap v268: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:40.847 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:40.848 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:40.876 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:40.877 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:41 vm08 ceph-mon[56824]: pgmap v269: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:41 vm00 ceph-mon[47668]: pgmap v269: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:43 vm08 ceph-mon[56824]: pgmap v270: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:43 vm00 ceph-mon[47668]: pgmap v270: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:45 vm08 ceph-mon[56824]: pgmap v271: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:45 vm00 ceph-mon[47668]: pgmap v271: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:45.878 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:45.879 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:45.904 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:45.904 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:47 vm08 ceph-mon[56824]: pgmap v272: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:47 vm00 ceph-mon[47668]: pgmap v272: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:49 vm08 ceph-mon[56824]: pgmap v273: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:49 vm00 ceph-mon[47668]: pgmap v273: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:50.905 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:50.906 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:50.931 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:50.932 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:51 vm08 ceph-mon[56824]: pgmap v274: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:51 vm00 ceph-mon[47668]: pgmap v274: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:53 vm08 ceph-mon[56824]: pgmap v275: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:53 vm00 ceph-mon[47668]: pgmap v275: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:55 vm08 ceph-mon[56824]: pgmap v276: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:55 vm00 ceph-mon[47668]: pgmap v276: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:55.933 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:12:55.933 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:12:55.961 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:12:55.962 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:12:56.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:56 vm08 ceph-mon[56824]: pgmap v277: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:56.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:56 vm00 ceph-mon[47668]: pgmap v277: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:12:58.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:12:58 vm08 ceph-mon[56824]: pgmap v278: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:12:58.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:12:58 vm00 ceph-mon[47668]: pgmap v278: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:00 vm08 ceph-mon[56824]: pgmap v279: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:00 vm00 ceph-mon[47668]: pgmap v279: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:00.963 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:00.964 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:01.025 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:01.026 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:02 vm08 ceph-mon[56824]: pgmap v280: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:02 vm00 ceph-mon[47668]: pgmap v280: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:04 vm08 ceph-mon[56824]: pgmap v281: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:04.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:04 vm00 ceph-mon[47668]: pgmap v281: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:13:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:13:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:13:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:13:06.028 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:06.028 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:06.056 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:06.056 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:06 vm08 ceph-mon[56824]: pgmap v282: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:06 vm00 ceph-mon[47668]: pgmap v282: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:08.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:08 vm08 ceph-mon[56824]: pgmap v283: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:08.885 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:08 vm00 ceph-mon[47668]: pgmap v283: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:11.058 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:11.058 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:11.084 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:11.084 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:10 vm08 ceph-mon[56824]: pgmap v284: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:10 vm00 ceph-mon[47668]: pgmap v284: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:12 vm00 ceph-mon[47668]: pgmap v285: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:12 vm08 ceph-mon[56824]: pgmap v285: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:13:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:13:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:13:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:13:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:13:14.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:13:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:13:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:13:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:13:14.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:13:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:14 vm08 ceph-mon[56824]: pgmap v286: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:14 vm00 ceph-mon[47668]: pgmap v286: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:16.086 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:16.087 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:16.114 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:16.114 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:17.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:16 vm08 ceph-mon[56824]: pgmap v287: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:16 vm00 ceph-mon[47668]: pgmap v287: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:18.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:18 vm00 ceph-mon[47668]: pgmap v288: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:18 vm08 ceph-mon[56824]: pgmap v288: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:21.116 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:21.116 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:20 vm08 ceph-mon[56824]: pgmap v289: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:21.143 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:21.143 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:20 vm00 ceph-mon[47668]: pgmap v289: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:22 vm08 ceph-mon[56824]: pgmap v290: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:23.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:22 vm00 ceph-mon[47668]: pgmap v290: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:24 vm00 ceph-mon[47668]: pgmap v291: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:24 vm08 ceph-mon[56824]: pgmap v291: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:26.145 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:26.146 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:26.172 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:26.172 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:27.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:26 vm00 ceph-mon[47668]: pgmap v292: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:27.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:26 vm08 ceph-mon[56824]: pgmap v292: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:28.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:28 vm00 ceph-mon[47668]: pgmap v293: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:28 vm08 ceph-mon[56824]: pgmap v293: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:31.173 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:31.174 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:31.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:30 vm00 ceph-mon[47668]: pgmap v294: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:31.202 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:31.203 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:30 vm08 ceph-mon[56824]: pgmap v294: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:33 vm08 ceph-mon[56824]: pgmap v295: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:33.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:33 vm00 ceph-mon[47668]: pgmap v295: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:35 vm08 ceph-mon[56824]: pgmap v296: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:35 vm00 ceph-mon[47668]: pgmap v296: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:36.205 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:36.205 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:36.236 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:36.237 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:37 vm08 ceph-mon[56824]: pgmap v297: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:37 vm00 ceph-mon[47668]: pgmap v297: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:38.154 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:38 vm08 ceph-mon[56824]: pgmap v298: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:38.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:38 vm00 ceph-mon[47668]: pgmap v298: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:41.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:40 vm08 ceph-mon[56824]: pgmap v299: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:40 vm00 ceph-mon[47668]: pgmap v299: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:41.238 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:41.239 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:41.266 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:41.267 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:42 vm00 ceph-mon[47668]: pgmap v300: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:42 vm08 ceph-mon[56824]: pgmap v300: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:44 vm08 ceph-mon[56824]: pgmap v301: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:44 vm00 ceph-mon[47668]: pgmap v301: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:46.269 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:46.269 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:46.311 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:46.312 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:46 vm08 ceph-mon[56824]: pgmap v302: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:46 vm00 ceph-mon[47668]: pgmap v302: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:48 vm08 ceph-mon[56824]: pgmap v303: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:48 vm00 ceph-mon[47668]: pgmap v303: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:51.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:50 vm08 ceph-mon[56824]: pgmap v304: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:50 vm00 ceph-mon[47668]: pgmap v304: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:51.314 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:51.314 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:51.339 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:51.339 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:52 vm00 ceph-mon[47668]: pgmap v305: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:52 vm08 ceph-mon[56824]: pgmap v305: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:54 vm08 ceph-mon[56824]: pgmap v306: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:54 vm00 ceph-mon[47668]: pgmap v306: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:56.340 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:13:56.341 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:13:56.367 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:13:56.368 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:13:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:56 vm08 ceph-mon[56824]: pgmap v307: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:56 vm00 ceph-mon[47668]: pgmap v307: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:13:58.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:13:58 vm00 ceph-mon[47668]: pgmap v308: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:13:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:13:58 vm08 ceph-mon[56824]: pgmap v308: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:00 vm08 ceph-mon[56824]: pgmap v309: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:00 vm00 ceph-mon[47668]: pgmap v309: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:01.369 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:01.370 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:01.396 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:01.396 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:02 vm08 ceph-mon[56824]: pgmap v310: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:03.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:02 vm00 ceph-mon[47668]: pgmap v310: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:04 vm00 ceph-mon[47668]: pgmap v311: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:04 vm08 ceph-mon[56824]: pgmap v311: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:14:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:14:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:14:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:14:06.398 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:06.398 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:06.435 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:06.436 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:06 vm00 ceph-mon[47668]: pgmap v312: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:06 vm08 ceph-mon[56824]: pgmap v312: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:08.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:08 vm00 ceph-mon[47668]: pgmap v313: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:08 vm08 ceph-mon[56824]: pgmap v313: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:10 vm08 ceph-mon[56824]: pgmap v314: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:11.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:10 vm00 ceph-mon[47668]: pgmap v314: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:11.437 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:11.438 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:11.463 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:11.464 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:12 vm08 ceph-mon[56824]: pgmap v315: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:13.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:12 vm00 ceph-mon[47668]: pgmap v315: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:14.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:14:14.135 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:14:14.136 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:14:14.136 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:13 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:14:14.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:14:14.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:14:14.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:14:14.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:13 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:14:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:14 vm08 ceph-mon[56824]: pgmap v316: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:14:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:14:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:14 vm00 ceph-mon[47668]: pgmap v316: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:14:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:14:16.465 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:16.466 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:16.494 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:16.495 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:17 vm08 ceph-mon[56824]: pgmap v317: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:17 vm00 ceph-mon[47668]: pgmap v317: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:19 vm08 ceph-mon[56824]: pgmap v318: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:19.407 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:19 vm00 ceph-mon[47668]: pgmap v318: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:21 vm08 ceph-mon[56824]: pgmap v319: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:21 vm00 ceph-mon[47668]: pgmap v319: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:21.496 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:21.496 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:21.522 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:21.523 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:23 vm08 ceph-mon[56824]: pgmap v320: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:23 vm00 ceph-mon[47668]: pgmap v320: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:25 vm08 ceph-mon[56824]: pgmap v321: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:25 vm00 ceph-mon[47668]: pgmap v321: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:26.524 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:26.547 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:26.737 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:26.738 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:27 vm08 ceph-mon[56824]: pgmap v322: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:27 vm00 ceph-mon[47668]: pgmap v322: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:29 vm08 ceph-mon[56824]: pgmap v323: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:29 vm00 ceph-mon[47668]: pgmap v323: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:31 vm08 ceph-mon[56824]: pgmap v324: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:31 vm00 ceph-mon[47668]: pgmap v324: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:31.739 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:31.740 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:31.765 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:31.765 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:33 vm08 ceph-mon[56824]: pgmap v325: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:33 vm00 ceph-mon[47668]: pgmap v325: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:35 vm08 ceph-mon[56824]: pgmap v326: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:35 vm00 ceph-mon[47668]: pgmap v326: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:36.766 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:36.767 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:36.792 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:36.792 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:37 vm08 ceph-mon[56824]: pgmap v327: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:37 vm00 ceph-mon[47668]: pgmap v327: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:38.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:38 vm08 ceph-mon[56824]: pgmap v328: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:38.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:38 vm00 ceph-mon[47668]: pgmap v328: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:40 vm08 ceph-mon[56824]: pgmap v329: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:40 vm00 ceph-mon[47668]: pgmap v329: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:41.794 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:41.794 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:41.819 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:41.820 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:42 vm00 ceph-mon[47668]: pgmap v330: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:42 vm08 ceph-mon[56824]: pgmap v330: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:45.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:44 vm08 ceph-mon[56824]: pgmap v331: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:44 vm00 ceph-mon[47668]: pgmap v331: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:46.821 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:46.822 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:46.848 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:46.849 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:46 vm08 ceph-mon[56824]: pgmap v332: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:46 vm00 ceph-mon[47668]: pgmap v332: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:48 vm00 ceph-mon[47668]: pgmap v333: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:48 vm08 ceph-mon[56824]: pgmap v333: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:50 vm08 ceph-mon[56824]: pgmap v334: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:50 vm00 ceph-mon[47668]: pgmap v334: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:51.850 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:51.851 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:51.875 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:51.875 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:53.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:52 vm00 ceph-mon[47668]: pgmap v335: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:52 vm08 ceph-mon[56824]: pgmap v335: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:54 vm08 ceph-mon[56824]: pgmap v336: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:54 vm00 ceph-mon[47668]: pgmap v336: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:56.877 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:14:56.877 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:14:56.910 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:14:56.910 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:14:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:56 vm08 ceph-mon[56824]: pgmap v337: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:56 vm00 ceph-mon[47668]: pgmap v337: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:14:58.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:14:58 vm00 ceph-mon[47668]: pgmap v338: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:14:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:14:58 vm08 ceph-mon[56824]: pgmap v338: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:00 vm08 ceph-mon[56824]: pgmap v339: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:00 vm00 ceph-mon[47668]: pgmap v339: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:01.912 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:01.912 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:01.938 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:01.939 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:02 vm00 ceph-mon[47668]: pgmap v340: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:02 vm08 ceph-mon[56824]: pgmap v340: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:04 vm08 ceph-mon[56824]: pgmap v341: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:04 vm00 ceph-mon[47668]: pgmap v341: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:15:06.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:15:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:15:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:15:06.941 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:06.941 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:06.967 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:06.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:06 vm08 ceph-mon[56824]: pgmap v342: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:06 vm00 ceph-mon[47668]: pgmap v342: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:08.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:08 vm00 ceph-mon[47668]: pgmap v343: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:08 vm08 ceph-mon[56824]: pgmap v343: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:10 vm00 ceph-mon[47668]: pgmap v344: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:10 vm08 ceph-mon[56824]: pgmap v344: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:11.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:11.969 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:11.995 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:11.995 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:12 vm08 ceph-mon[56824]: pgmap v345: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:12 vm00 ceph-mon[47668]: pgmap v345: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: pgmap v346: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:15:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:15:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:15:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:15:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:15:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:14 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:15:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: pgmap v346: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:15.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:15:15.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:15:15.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:15:15.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:15:15.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:15:15.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:14 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:15:16.996 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:16.997 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:17.026 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:17.027 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:17 vm08 ceph-mon[56824]: pgmap v347: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:17 vm00 ceph-mon[47668]: pgmap v347: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:19 vm08 ceph-mon[56824]: pgmap v348: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:19 vm00 ceph-mon[47668]: pgmap v348: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:20.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:20 vm00 ceph-mon[47668]: pgmap v349: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:20.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:20 vm08 ceph-mon[56824]: pgmap v349: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:22.028 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:22.029 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:22.145 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:22.147 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:22 vm08 ceph-mon[56824]: pgmap v350: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:22 vm00 ceph-mon[47668]: pgmap v350: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:24 vm00 ceph-mon[47668]: pgmap v351: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:24 vm08 ceph-mon[56824]: pgmap v351: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:27.147 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:27.147 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:27.174 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:27.174 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:26 vm00 ceph-mon[47668]: pgmap v352: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:26 vm08 ceph-mon[56824]: pgmap v352: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:28.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:28 vm00 ceph-mon[47668]: pgmap v353: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:28 vm08 ceph-mon[56824]: pgmap v353: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:30 vm00 ceph-mon[47668]: pgmap v354: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:30 vm08 ceph-mon[56824]: pgmap v354: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:32.176 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:32.176 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:32.202 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:32.203 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:32 vm00 ceph-mon[47668]: pgmap v355: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:32 vm08 ceph-mon[56824]: pgmap v355: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:34 vm00 ceph-mon[47668]: pgmap v356: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:34 vm08 ceph-mon[56824]: pgmap v356: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:37.204 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:37.205 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:37.236 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:37.237 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:36 vm08 ceph-mon[56824]: pgmap v357: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:36 vm00 ceph-mon[47668]: pgmap v357: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:39.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:39 vm08 ceph-mon[56824]: pgmap v358: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:39 vm00 ceph-mon[47668]: pgmap v358: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:40.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:40 vm08 ceph-mon[56824]: pgmap v359: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:40.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:40 vm00 ceph-mon[47668]: pgmap v359: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:42.238 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:42.239 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:42.270 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:42.271 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:42 vm00 ceph-mon[47668]: pgmap v360: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:42 vm08 ceph-mon[56824]: pgmap v360: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:45 vm00 ceph-mon[47668]: pgmap v361: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:45 vm08 ceph-mon[56824]: pgmap v361: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:46 vm08 ceph-mon[56824]: pgmap v362: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:46 vm00 ceph-mon[47668]: pgmap v362: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:47.273 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:47.274 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:47.364 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:47.365 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:48 vm00 ceph-mon[47668]: pgmap v363: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:48 vm08 ceph-mon[56824]: pgmap v363: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:51.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:51 vm08 ceph-mon[56824]: pgmap v364: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:51 vm00 ceph-mon[47668]: pgmap v364: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:52.367 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:52.367 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:52.404 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:52.404 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:52.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:52 vm08 ceph-mon[56824]: pgmap v365: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:52.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:52 vm00 ceph-mon[47668]: pgmap v365: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:54 vm08 ceph-mon[56824]: pgmap v366: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:55.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:54 vm00 ceph-mon[47668]: pgmap v366: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:56 vm08 ceph-mon[56824]: pgmap v367: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:57.405 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:15:57.406 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:15:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:56 vm00 ceph-mon[47668]: pgmap v367: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:15:57.433 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:15:57.434 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:15:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:15:58 vm08 ceph-mon[56824]: pgmap v368: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:15:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:15:58 vm00 ceph-mon[47668]: pgmap v368: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:01.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:01 vm08 ceph-mon[56824]: pgmap v369: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:01 vm00 ceph-mon[47668]: pgmap v369: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:02.435 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:02.436 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:02.461 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:02.462 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:03 vm08 ceph-mon[56824]: pgmap v370: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:03 vm00 ceph-mon[47668]: pgmap v370: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:05 vm08 ceph-mon[56824]: pgmap v371: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:05 vm00 ceph-mon[47668]: pgmap v371: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:16:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:16:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:16:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:16:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:07 vm08 ceph-mon[56824]: pgmap v372: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:07 vm00 ceph-mon[47668]: pgmap v372: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:07.463 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:07.464 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:07.496 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:07.497 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:09 vm08 ceph-mon[56824]: pgmap v373: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:09 vm00 ceph-mon[47668]: pgmap v373: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:11 vm08 ceph-mon[56824]: pgmap v374: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:11 vm00 ceph-mon[47668]: pgmap v374: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:12.498 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:12.499 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:12.525 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:12.526 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:13 vm08 ceph-mon[56824]: pgmap v375: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:13 vm00 ceph-mon[47668]: pgmap v375: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: pgmap v376: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:16:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:16:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:16:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:16:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:16:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:16:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: pgmap v376: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:16:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:16:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:16:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:16:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:16:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:16:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:17 vm08 ceph-mon[56824]: pgmap v377: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:17 vm00 ceph-mon[47668]: pgmap v377: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:17.527 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:17.528 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:17.554 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:17.555 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:19 vm08 ceph-mon[56824]: pgmap v378: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:19.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:19 vm00 ceph-mon[47668]: pgmap v378: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:20.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:20 vm00 ceph-mon[47668]: pgmap v379: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:20.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:20 vm08 ceph-mon[56824]: pgmap v379: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:22.557 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:22.557 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:22.583 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:22.583 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:22 vm08 ceph-mon[56824]: pgmap v380: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:22 vm00 ceph-mon[47668]: pgmap v380: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:24 vm08 ceph-mon[56824]: pgmap v381: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:24 vm00 ceph-mon[47668]: pgmap v381: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:27 vm08 ceph-mon[56824]: pgmap v382: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:27 vm00 ceph-mon[47668]: pgmap v382: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:27.585 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:27.585 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:27.611 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:27.611 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:29 vm08 ceph-mon[56824]: pgmap v383: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:29 vm00 ceph-mon[47668]: pgmap v383: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:31 vm08 ceph-mon[56824]: pgmap v384: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:31 vm00 ceph-mon[47668]: pgmap v384: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:32.613 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:32.613 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:32.639 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:32.640 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:33 vm08 ceph-mon[56824]: pgmap v385: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:33 vm00 ceph-mon[47668]: pgmap v385: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:35 vm08 ceph-mon[56824]: pgmap v386: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:35 vm00 ceph-mon[47668]: pgmap v386: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:37 vm08 ceph-mon[56824]: pgmap v387: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:37 vm00 ceph-mon[47668]: pgmap v387: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:37.641 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:37.642 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:37.670 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:37.671 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:39 vm08 ceph-mon[56824]: pgmap v388: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:39.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:39 vm00 ceph-mon[47668]: pgmap v388: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:41 vm08 ceph-mon[56824]: pgmap v389: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:41.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:41 vm00 ceph-mon[47668]: pgmap v389: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:42.672 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:42.673 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:42.699 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:42.699 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:43 vm08 ceph-mon[56824]: pgmap v390: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:43 vm00 ceph-mon[47668]: pgmap v390: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:45 vm08 ceph-mon[56824]: pgmap v391: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:45 vm00 ceph-mon[47668]: pgmap v391: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:47 vm08 ceph-mon[56824]: pgmap v392: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:47 vm00 ceph-mon[47668]: pgmap v392: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:47.701 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:47.701 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:47.728 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:47.729 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:49 vm08 ceph-mon[56824]: pgmap v393: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:49.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:49 vm00 ceph-mon[47668]: pgmap v393: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:51 vm08 ceph-mon[56824]: pgmap v394: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:51.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:51 vm00 ceph-mon[47668]: pgmap v394: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:52.730 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:52.730 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:52.783 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:52.784 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:53 vm08 ceph-mon[56824]: pgmap v395: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:53 vm00 ceph-mon[47668]: pgmap v395: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:55 vm08 ceph-mon[56824]: pgmap v396: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:55.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:55 vm00 ceph-mon[47668]: pgmap v396: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:57 vm08 ceph-mon[56824]: pgmap v397: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:57.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:57 vm00 ceph-mon[47668]: pgmap v397: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:16:57.785 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:16:57.786 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:16:57.812 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:16:57.812 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:16:58.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:16:58 vm08 ceph-mon[56824]: pgmap v398: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:16:58.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:16:58 vm00 ceph-mon[47668]: pgmap v398: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:01 vm08 ceph-mon[56824]: pgmap v399: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:01 vm00 ceph-mon[47668]: pgmap v399: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:02.814 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:02.814 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:02.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:02.841 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:03 vm08 ceph-mon[56824]: pgmap v400: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:03 vm00 ceph-mon[47668]: pgmap v400: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:05 vm08 ceph-mon[56824]: pgmap v401: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:05 vm00 ceph-mon[47668]: pgmap v401: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:17:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:17:06.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:17:06.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:17:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:07 vm08 ceph-mon[56824]: pgmap v402: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:07 vm00 ceph-mon[47668]: pgmap v402: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:07.843 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:07.843 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:07.869 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:07.869 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:09 vm08 ceph-mon[56824]: pgmap v403: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:09 vm00 ceph-mon[47668]: pgmap v403: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:11 vm08 ceph-mon[56824]: pgmap v404: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:11.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:11 vm00 ceph-mon[47668]: pgmap v404: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:12.870 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:12.871 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:12.904 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:12.904 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:13 vm08 ceph-mon[56824]: pgmap v405: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:13 vm00 ceph-mon[47668]: pgmap v405: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:15.165 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:15 vm08 ceph-mon[56824]: pgmap v406: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:15.165 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:17:15.165 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:17:15.165 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:15 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:17:15.176 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:15 vm00 ceph-mon[47668]: pgmap v406: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:15.176 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:17:15.176 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:17:15.176 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:15 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:17:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:16 vm00 ceph-mon[47668]: pgmap v407: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:16 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:16 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:17:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:16 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:16 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:16 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:16 vm08 ceph-mon[56824]: pgmap v407: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:16 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:16 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:17:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:16 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:16 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:16 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:17:17.906 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:17.906 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:17.932 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:17.933 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:18 vm00 ceph-mon[47668]: pgmap v408: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:18 vm08 ceph-mon[56824]: pgmap v408: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:21 vm08 ceph-mon[56824]: pgmap v409: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:21 vm00 ceph-mon[47668]: pgmap v409: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:22.934 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:22.935 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:22.961 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:22.961 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:23 vm08 ceph-mon[56824]: pgmap v410: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:23 vm00 ceph-mon[47668]: pgmap v410: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:25 vm08 ceph-mon[56824]: pgmap v411: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:25.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:25 vm00 ceph-mon[47668]: pgmap v411: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:26.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:26 vm00 ceph-mon[47668]: pgmap v412: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:26 vm08 ceph-mon[56824]: pgmap v412: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:27.962 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:27.963 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:27.990 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:27.991 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:28.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:28 vm00 ceph-mon[47668]: pgmap v413: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:28 vm08 ceph-mon[56824]: pgmap v413: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:31 vm08 ceph-mon[56824]: pgmap v414: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:31.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:31 vm00 ceph-mon[47668]: pgmap v414: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:32.993 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:32.993 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:33.020 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:33.021 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:33 vm08 ceph-mon[56824]: pgmap v415: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:33 vm00 ceph-mon[47668]: pgmap v415: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:35 vm08 ceph-mon[56824]: pgmap v416: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:35 vm00 ceph-mon[47668]: pgmap v416: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:37.143 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:37 vm08 ceph-mon[56824]: pgmap v417: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:37 vm00 ceph-mon[47668]: pgmap v417: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:38.022 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:38.023 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:38.048 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:38.049 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:39.343 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:39 vm00 ceph-mon[47668]: pgmap v418: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:39.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:39 vm08 ceph-mon[56824]: pgmap v418: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:41.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:41 vm08 ceph-mon[56824]: pgmap v419: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:41.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:41 vm00 ceph-mon[47668]: pgmap v419: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:43.051 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:43.052 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:43.078 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:43.078 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:43 vm08 ceph-mon[56824]: pgmap v420: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:43 vm00 ceph-mon[47668]: pgmap v420: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:45 vm08 ceph-mon[56824]: pgmap v421: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:45 vm00 ceph-mon[47668]: pgmap v421: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:47 vm08 ceph-mon[56824]: pgmap v422: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:47 vm00 ceph-mon[47668]: pgmap v422: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:48.080 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:48.081 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:48.107 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:48.107 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:49 vm08 ceph-mon[56824]: pgmap v423: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:49 vm00 ceph-mon[47668]: pgmap v423: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:50.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:50 vm00 ceph-mon[47668]: pgmap v424: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:50.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:50 vm08 ceph-mon[56824]: pgmap v424: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:52.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:52 vm00 ceph-mon[47668]: pgmap v425: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:53.109 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:53.110 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:53.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:52 vm08 ceph-mon[56824]: pgmap v425: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:53.138 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:53.139 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:55 vm08 ceph-mon[56824]: pgmap v426: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:55.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:55 vm00 ceph-mon[47668]: pgmap v426: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:57.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:57 vm08 ceph-mon[56824]: pgmap v427: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:57.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:57 vm00 ceph-mon[47668]: pgmap v427: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:17:58.140 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:17:58.141 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:17:58.166 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:17:58.167 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:17:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:17:59 vm08 ceph-mon[56824]: pgmap v428: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:17:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:17:59 vm00 ceph-mon[47668]: pgmap v428: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:01 vm08 ceph-mon[56824]: pgmap v429: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:01.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:01 vm00 ceph-mon[47668]: pgmap v429: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:03.168 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:03.169 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:03.194 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:03.195 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:03 vm08 ceph-mon[56824]: pgmap v430: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:03 vm00 ceph-mon[47668]: pgmap v430: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:05.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:05 vm08 ceph-mon[56824]: pgmap v431: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:05 vm00 ceph-mon[47668]: pgmap v431: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:18:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:18:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:18:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:18:07.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:07 vm08 ceph-mon[56824]: pgmap v432: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:07 vm00 ceph-mon[47668]: pgmap v432: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:08.196 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:08.197 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:08.230 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:08.230 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:08.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:08 vm08 ceph-mon[56824]: pgmap v433: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:08.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:08 vm00 ceph-mon[47668]: pgmap v433: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:11.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:11 vm08 ceph-mon[56824]: pgmap v434: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:11.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:11 vm00 ceph-mon[47668]: pgmap v434: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:13.232 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:13.233 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:13.260 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:13.260 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:13.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:13 vm08 ceph-mon[56824]: pgmap v435: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:13.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:13 vm00 ceph-mon[47668]: pgmap v435: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:15 vm08 ceph-mon[56824]: pgmap v436: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:15 vm00 ceph-mon[47668]: pgmap v436: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:16.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:16 vm00 ceph-mon[47668]: pgmap v437: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:16.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:16 vm08 ceph-mon[56824]: pgmap v437: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:17.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:18:17.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:18:17.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:18:17.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:18:17.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:18:17.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:18:17.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:18:17.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:18:17.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:18:17.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:18:18.262 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:18.263 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:18.288 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:18.288 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:18.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:18 vm00 ceph-mon[47668]: pgmap v438: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:18.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:18 vm08 ceph-mon[56824]: pgmap v438: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:21 vm08 ceph-mon[56824]: pgmap v439: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:21.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:21 vm00 ceph-mon[47668]: pgmap v439: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:23.290 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:23.290 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:23.317 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:23.317 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:23 vm08 ceph-mon[56824]: pgmap v440: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:23 vm00 ceph-mon[47668]: pgmap v440: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:25 vm08 ceph-mon[56824]: pgmap v441: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:25 vm00 ceph-mon[47668]: pgmap v441: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:27 vm08 ceph-mon[56824]: pgmap v442: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:27 vm00 ceph-mon[47668]: pgmap v442: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:28.319 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:28.319 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:28.347 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:28.348 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:29 vm08 ceph-mon[56824]: pgmap v443: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:29 vm00 ceph-mon[47668]: pgmap v443: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:31 vm08 ceph-mon[56824]: pgmap v444: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:31 vm00 ceph-mon[47668]: pgmap v444: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:32.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:32 vm00 ceph-mon[47668]: pgmap v445: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:32.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:32 vm08 ceph-mon[56824]: pgmap v445: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:33.349 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:33.350 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:33.376 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:33.377 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:35 vm08 ceph-mon[56824]: pgmap v446: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:35 vm00 ceph-mon[47668]: pgmap v446: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:37 vm08 ceph-mon[56824]: pgmap v447: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:37 vm00 ceph-mon[47668]: pgmap v447: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:38.378 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:38.379 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:38.405 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:38.405 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:39.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:39 vm08 ceph-mon[56824]: pgmap v448: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:39.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:39 vm00 ceph-mon[47668]: pgmap v448: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:41.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:41 vm08 ceph-mon[56824]: pgmap v449: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:41.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:41 vm00 ceph-mon[47668]: pgmap v449: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:43 vm08 ceph-mon[56824]: pgmap v450: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:43.406 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:43.407 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:43 vm00 ceph-mon[47668]: pgmap v450: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:43.433 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:43.433 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:45 vm08 ceph-mon[56824]: pgmap v451: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:45 vm00 ceph-mon[47668]: pgmap v451: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:47.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:47 vm08 ceph-mon[56824]: pgmap v452: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:47.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:47 vm00 ceph-mon[47668]: pgmap v452: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:48.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:48 vm00 ceph-mon[47668]: pgmap v453: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:48.435 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:48.435 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:48.465 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:48.465 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:48.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:48 vm08 ceph-mon[56824]: pgmap v453: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:51 vm00 ceph-mon[47668]: pgmap v454: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:51 vm08 ceph-mon[56824]: pgmap v454: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:52.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:52 vm00 ceph-mon[47668]: pgmap v455: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:52.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:52 vm08 ceph-mon[56824]: pgmap v455: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:53.466 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:53.466 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:53.492 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:53.492 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:55 vm08 ceph-mon[56824]: pgmap v456: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:55 vm00 ceph-mon[47668]: pgmap v456: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:57 vm08 ceph-mon[56824]: pgmap v457: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:57.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:57 vm00 ceph-mon[47668]: pgmap v457: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:18:58.494 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:18:58.494 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:18:58.519 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:18:58.520 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:18:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:18:59 vm08 ceph-mon[56824]: pgmap v458: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:18:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:18:59 vm00 ceph-mon[47668]: pgmap v458: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:01 vm08 ceph-mon[56824]: pgmap v459: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:01 vm00 ceph-mon[47668]: pgmap v459: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:03 vm08 ceph-mon[56824]: pgmap v460: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:03 vm00 ceph-mon[47668]: pgmap v460: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:03.521 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:03.522 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:03.547 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:03.547 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:05 vm08 ceph-mon[56824]: pgmap v461: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:05 vm00 ceph-mon[47668]: pgmap v461: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:19:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:06 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:19:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:19:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:06 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:19:07.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:07 vm08 ceph-mon[56824]: pgmap v462: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:07.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:07 vm00 ceph-mon[47668]: pgmap v462: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:08.549 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:08.549 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:08.575 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:08.575 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:09 vm08 ceph-mon[56824]: pgmap v463: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:09 vm00 ceph-mon[47668]: pgmap v463: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:10.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:10 vm00 ceph-mon[47668]: pgmap v464: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:10 vm08 ceph-mon[56824]: pgmap v464: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:12.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:12 vm00 ceph-mon[47668]: pgmap v465: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:12 vm08 ceph-mon[56824]: pgmap v465: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:13.577 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:13.577 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:13.603 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:13.603 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:15 vm08 ceph-mon[56824]: pgmap v466: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:15 vm00 ceph-mon[47668]: pgmap v466: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:17 vm00 ceph-mon[47668]: pgmap v467: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:19:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:19:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:19:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:17 vm08 ceph-mon[56824]: pgmap v467: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:19:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:19:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:19:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:19:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:19:18.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:18 vm00 ceph-mon[47668]: pgmap v468: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:18.605 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:18.606 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:18.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:19:18.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:19:18.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:18 vm08 ceph-mon[56824]: pgmap v468: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:18.710 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:18.711 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:20.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:20 vm08 ceph-mon[56824]: pgmap v469: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:20 vm00 ceph-mon[47668]: pgmap v469: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:23.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:22 vm00 ceph-mon[47668]: pgmap v470: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:22 vm08 ceph-mon[56824]: pgmap v470: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:23.713 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:23.713 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:23.740 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:23.740 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:25.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:24 vm08 ceph-mon[56824]: pgmap v471: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:24 vm00 ceph-mon[47668]: pgmap v471: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:27.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:26 vm08 ceph-mon[56824]: pgmap v472: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:26 vm00 ceph-mon[47668]: pgmap v472: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:28.742 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:28.742 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:28.768 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:28.769 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:28 vm08 ceph-mon[56824]: pgmap v473: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:28 vm00 ceph-mon[47668]: pgmap v473: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:30 vm08 ceph-mon[56824]: pgmap v474: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:30 vm00 ceph-mon[47668]: pgmap v474: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:32 vm00 ceph-mon[47668]: pgmap v475: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:32 vm08 ceph-mon[56824]: pgmap v475: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:33.770 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:33.771 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:33.797 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:33.798 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:34 vm00 ceph-mon[47668]: pgmap v476: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:34 vm08 ceph-mon[56824]: pgmap v476: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:36 vm08 ceph-mon[56824]: pgmap v477: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:36 vm00 ceph-mon[47668]: pgmap v477: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:38.799 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:38.799 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:38.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:38.842 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:38 vm00 ceph-mon[47668]: pgmap v478: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:38 vm08 ceph-mon[56824]: pgmap v478: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:40.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:40 vm08 ceph-mon[56824]: pgmap v479: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:40.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:40 vm00 ceph-mon[47668]: pgmap v479: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:42.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:42 vm00 ceph-mon[47668]: pgmap v480: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:43.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:42 vm08 ceph-mon[56824]: pgmap v480: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:43.843 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:43.844 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:43.895 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:43.895 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:44.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:44 vm00 ceph-mon[47668]: pgmap v481: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:45.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:44 vm08 ceph-mon[56824]: pgmap v481: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:46.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:46 vm08 ceph-mon[56824]: pgmap v482: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:46 vm00 ceph-mon[47668]: pgmap v482: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:48.896 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:48.897 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:48.922 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:48.923 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:48 vm00 ceph-mon[47668]: pgmap v483: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:48 vm08 ceph-mon[56824]: pgmap v483: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:50 vm08 ceph-mon[56824]: pgmap v484: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:50.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:50 vm00 ceph-mon[47668]: pgmap v484: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:52.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:52 vm00 ceph-mon[47668]: pgmap v485: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:52 vm08 ceph-mon[56824]: pgmap v485: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:53.924 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:53.924 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:53.949 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:53.950 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:54 vm00 ceph-mon[47668]: pgmap v486: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:54 vm08 ceph-mon[56824]: pgmap v486: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:56 vm08 ceph-mon[56824]: pgmap v487: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:56 vm00 ceph-mon[47668]: pgmap v487: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:19:58.951 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:19:58.951 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:19:58.980 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:19:58.981 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:19:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:19:58 vm08 ceph-mon[56824]: pgmap v488: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:19:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:19:58 vm00 ceph-mon[47668]: pgmap v488: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:00.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:00 vm08 ceph-mon[56824]: pgmap v489: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:00.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:00 vm08 ceph-mon[56824]: overall HEALTH_OK 2026-03-08T23:20:00.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:00 vm00 ceph-mon[47668]: pgmap v489: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:00.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:00 vm00 ceph-mon[47668]: overall HEALTH_OK 2026-03-08T23:20:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:02 vm00 ceph-mon[47668]: pgmap v490: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:02 vm08 ceph-mon[56824]: pgmap v490: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:03.982 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:03.983 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:04.008 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:04.008 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:04.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:04 vm00 ceph-mon[47668]: pgmap v491: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:04 vm08 ceph-mon[56824]: pgmap v491: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:20:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:20:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:20:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:20:06.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:06 vm08 ceph-mon[56824]: pgmap v492: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:07.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:06 vm00 ceph-mon[47668]: pgmap v492: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:09.009 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:09.010 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:09.038 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:09.038 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:08 vm08 ceph-mon[56824]: pgmap v493: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:08 vm00 ceph-mon[47668]: pgmap v493: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:10 vm08 ceph-mon[56824]: pgmap v494: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:10.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:10 vm00 ceph-mon[47668]: pgmap v494: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:12.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:12 vm00 ceph-mon[47668]: pgmap v495: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:12 vm08 ceph-mon[56824]: pgmap v495: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:14.040 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:14.040 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:14.068 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:14.069 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:14.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:14 vm00 ceph-mon[47668]: pgmap v496: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:14 vm08 ceph-mon[56824]: pgmap v496: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:16 vm08 ceph-mon[56824]: pgmap v497: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:16.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:16 vm00 ceph-mon[47668]: pgmap v497: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:17.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:20:17.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:20:17.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:20:17.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:20:17.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:20:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:20:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:20:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:20:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:20:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:20:18.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:18 vm00 ceph-mon[47668]: pgmap v498: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:19.070 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:19.071 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:19.096 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:19.096 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:18 vm08 ceph-mon[56824]: pgmap v498: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:20.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:20 vm08 ceph-mon[56824]: pgmap v499: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:20 vm00 ceph-mon[47668]: pgmap v499: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:22.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:22 vm00 ceph-mon[47668]: pgmap v500: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:22 vm08 ceph-mon[56824]: pgmap v500: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:24.097 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:24.098 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:24.125 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:24.125 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:24.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:24 vm00 ceph-mon[47668]: pgmap v501: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:24 vm08 ceph-mon[56824]: pgmap v501: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:26 vm08 ceph-mon[56824]: pgmap v502: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:26 vm00 ceph-mon[47668]: pgmap v502: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:28.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:28 vm00 ceph-mon[47668]: pgmap v503: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:29.127 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:28 vm08 ceph-mon[56824]: pgmap v503: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:29.127 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:29.153 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:29.153 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:30 vm08 ceph-mon[56824]: pgmap v504: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:30 vm00 ceph-mon[47668]: pgmap v504: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:32.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:32 vm00 ceph-mon[47668]: pgmap v505: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:32 vm08 ceph-mon[56824]: pgmap v505: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:34.154 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:34.155 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:34.180 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:34.181 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:34 vm00 ceph-mon[47668]: pgmap v506: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:34 vm08 ceph-mon[56824]: pgmap v506: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:36 vm08 ceph-mon[56824]: pgmap v507: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:36 vm00 ceph-mon[47668]: pgmap v507: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:39.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:38 vm08 ceph-mon[56824]: pgmap v508: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:38 vm00 ceph-mon[47668]: pgmap v508: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:39.182 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:39.182 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:39.208 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:39.208 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:40.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:40 vm08 ceph-mon[56824]: pgmap v509: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:40.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:40 vm00 ceph-mon[47668]: pgmap v509: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:42 vm00 ceph-mon[47668]: pgmap v510: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:42 vm08 ceph-mon[56824]: pgmap v510: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:44.210 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:44.210 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:44.235 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:44.235 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:44.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:44 vm00 ceph-mon[47668]: pgmap v511: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:44 vm08 ceph-mon[56824]: pgmap v511: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:46 vm08 ceph-mon[56824]: pgmap v512: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:46 vm00 ceph-mon[47668]: pgmap v512: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:48 vm08 ceph-mon[56824]: pgmap v513: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:48 vm00 ceph-mon[47668]: pgmap v513: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:49.236 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:49.237 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:49.262 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:49.262 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:50 vm08 ceph-mon[56824]: pgmap v514: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:50.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:50 vm00 ceph-mon[47668]: pgmap v514: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:52.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:52 vm00 ceph-mon[47668]: pgmap v515: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:52 vm08 ceph-mon[56824]: pgmap v515: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:54.263 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:54.264 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:54.332 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:54.332 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:20:55.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:54 vm08 ceph-mon[56824]: pgmap v516: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:55.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:54 vm00 ceph-mon[47668]: pgmap v516: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:57.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:56 vm08 ceph-mon[56824]: pgmap v517: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:57.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:56 vm00 ceph-mon[47668]: pgmap v517: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:20:59.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:20:58 vm08 ceph-mon[56824]: pgmap v518: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:20:58 vm00 ceph-mon[47668]: pgmap v518: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:20:59.333 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:20:59.333 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:20:59.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:20:59.360 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:00.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:00 vm08 ceph-mon[56824]: pgmap v519: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:00.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:00 vm00 ceph-mon[47668]: pgmap v519: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:02 vm00 ceph-mon[47668]: pgmap v520: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:02 vm08 ceph-mon[56824]: pgmap v520: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:04.362 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:04.362 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:04.387 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:04.388 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:04.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:04 vm00 ceph-mon[47668]: pgmap v521: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:04 vm08 ceph-mon[56824]: pgmap v521: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:21:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:21:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:21:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:21:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:06 vm08 ceph-mon[56824]: pgmap v522: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:06.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:06 vm00 ceph-mon[47668]: pgmap v522: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:08 vm08 ceph-mon[56824]: pgmap v523: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:08 vm00 ceph-mon[47668]: pgmap v523: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:09.389 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:09.390 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:09.415 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:09.415 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:10 vm08 ceph-mon[56824]: pgmap v524: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:10.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:10 vm00 ceph-mon[47668]: pgmap v524: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:12.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:12 vm00 ceph-mon[47668]: pgmap v525: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:13.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:12 vm08 ceph-mon[56824]: pgmap v525: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:14.417 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:14.417 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:14.443 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:14.443 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:14.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:14 vm00 ceph-mon[47668]: pgmap v526: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:14 vm08 ceph-mon[56824]: pgmap v526: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:16 vm08 ceph-mon[56824]: pgmap v527: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:16 vm00 ceph-mon[47668]: pgmap v527: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:21:17.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:21:17.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:21:17.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:21:17.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:21:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:21:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:21:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:21:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:21:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:21:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:18 vm08 ceph-mon[56824]: pgmap v528: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:18 vm00 ceph-mon[47668]: pgmap v528: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:19.445 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:19.445 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:19.473 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:19.473 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:20.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:20 vm08 ceph-mon[56824]: pgmap v529: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:20.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:20 vm00 ceph-mon[47668]: pgmap v529: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:22 vm00 ceph-mon[47668]: pgmap v530: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:22 vm08 ceph-mon[56824]: pgmap v530: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:24.475 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:24.475 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:24.501 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:24.502 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:24.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:24 vm00 ceph-mon[47668]: pgmap v531: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:24 vm08 ceph-mon[56824]: pgmap v531: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:26.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:26 vm08 ceph-mon[56824]: pgmap v532: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:26.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:26 vm00 ceph-mon[47668]: pgmap v532: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:28 vm08 ceph-mon[56824]: pgmap v533: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:28 vm00 ceph-mon[47668]: pgmap v533: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:29.503 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:29.504 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:29.530 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:29.531 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:30 vm08 ceph-mon[56824]: pgmap v534: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:30 vm00 ceph-mon[47668]: pgmap v534: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:32 vm00 ceph-mon[47668]: pgmap v535: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:32 vm08 ceph-mon[56824]: pgmap v535: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:34.532 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:34.533 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:34.578 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:34.578 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:34 vm08 ceph-mon[56824]: pgmap v536: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:35.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:34 vm00 ceph-mon[47668]: pgmap v536: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:36 vm08 ceph-mon[56824]: pgmap v537: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:36 vm00 ceph-mon[47668]: pgmap v537: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:39.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:38 vm08 ceph-mon[56824]: pgmap v538: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:38 vm00 ceph-mon[47668]: pgmap v538: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:39.579 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:39.580 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:39.604 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:39.605 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:40.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:40 vm08 ceph-mon[56824]: pgmap v539: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:40.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:40 vm00 ceph-mon[47668]: pgmap v539: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:42 vm00 ceph-mon[47668]: pgmap v540: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:42 vm08 ceph-mon[56824]: pgmap v540: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:44.606 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:44.607 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:44.634 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:44.635 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:44 vm08 ceph-mon[56824]: pgmap v541: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:44 vm00 ceph-mon[47668]: pgmap v541: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:46 vm08 ceph-mon[56824]: pgmap v542: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:46 vm00 ceph-mon[47668]: pgmap v542: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:48 vm08 ceph-mon[56824]: pgmap v543: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:48 vm00 ceph-mon[47668]: pgmap v543: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:49.636 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:49.637 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:49.662 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:49.663 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:50 vm08 ceph-mon[56824]: pgmap v544: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:50.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:50 vm00 ceph-mon[47668]: pgmap v544: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:52 vm08 ceph-mon[56824]: pgmap v545: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:52 vm00 ceph-mon[47668]: pgmap v545: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:54.664 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:54.665 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:54.691 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:54.691 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:21:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:55 vm08 ceph-mon[56824]: pgmap v546: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:54 vm00 ceph-mon[47668]: pgmap v546: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:56 vm08 ceph-mon[56824]: pgmap v547: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:56 vm00 ceph-mon[47668]: pgmap v547: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:21:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:21:58 vm08 ceph-mon[56824]: pgmap v548: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:21:58 vm00 ceph-mon[47668]: pgmap v548: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:21:59.693 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:21:59.693 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:21:59.723 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:21:59.724 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:00.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:00 vm08 ceph-mon[56824]: pgmap v549: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:00.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:00 vm00 ceph-mon[47668]: pgmap v549: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:02 vm00 ceph-mon[47668]: pgmap v550: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:02 vm08 ceph-mon[56824]: pgmap v550: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:04.725 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:04.726 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:04.752 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:04.752 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:04 vm08 ceph-mon[56824]: pgmap v551: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:04 vm00 ceph-mon[47668]: pgmap v551: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:22:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:22:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:22:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:22:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:06 vm08 ceph-mon[56824]: pgmap v552: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:06 vm00 ceph-mon[47668]: pgmap v552: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:08 vm08 ceph-mon[56824]: pgmap v553: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:08 vm00 ceph-mon[47668]: pgmap v553: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:09.753 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:09.754 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:09.780 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:09.780 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:10 vm08 ceph-mon[56824]: pgmap v554: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:10.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:10 vm00 ceph-mon[47668]: pgmap v554: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:12.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:12 vm00 ceph-mon[47668]: pgmap v555: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:12 vm08 ceph-mon[56824]: pgmap v555: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:14.782 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:14.782 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:14.808 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:14.809 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:14 vm08 ceph-mon[56824]: pgmap v556: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:14 vm00 ceph-mon[47668]: pgmap v556: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:16.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:16 vm08 ceph-mon[56824]: pgmap v557: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:16 vm00 ceph-mon[47668]: pgmap v557: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:22:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:22:18.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:17 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:22:18.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:22:18.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:22:18.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:17 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:22:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:18 vm00 ceph-mon[47668]: pgmap v558: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:22:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:22:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:18 vm08 ceph-mon[56824]: pgmap v558: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:19.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:22:19.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:22:19.810 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:19.811 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:19.836 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:19.837 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:20.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:20 vm08 ceph-mon[56824]: pgmap v559: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:20.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:20 vm00 ceph-mon[47668]: pgmap v559: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:22 vm00 ceph-mon[47668]: pgmap v560: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:22 vm08 ceph-mon[56824]: pgmap v560: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:24.838 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:24.839 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:24.865 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:24.866 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:24 vm08 ceph-mon[56824]: pgmap v561: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:24 vm00 ceph-mon[47668]: pgmap v561: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:26 vm08 ceph-mon[56824]: pgmap v562: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:27.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:26 vm00 ceph-mon[47668]: pgmap v562: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:28 vm08 ceph-mon[56824]: pgmap v563: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:28 vm00 ceph-mon[47668]: pgmap v563: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:29.868 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:29.868 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:29.894 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:29.894 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:30 vm08 ceph-mon[56824]: pgmap v564: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:30 vm00 ceph-mon[47668]: pgmap v564: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:32 vm00 ceph-mon[47668]: pgmap v565: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:33.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:32 vm08 ceph-mon[56824]: pgmap v565: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:34.896 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:34.896 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:34.923 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:34.923 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:34 vm08 ceph-mon[56824]: pgmap v566: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:34 vm00 ceph-mon[47668]: pgmap v566: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:36 vm08 ceph-mon[56824]: pgmap v567: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:36 vm00 ceph-mon[47668]: pgmap v567: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:38 vm08 ceph-mon[56824]: pgmap v568: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:38 vm00 ceph-mon[47668]: pgmap v568: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:39.924 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:39.925 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:39.950 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:39.951 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:40.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:40 vm08 ceph-mon[56824]: pgmap v569: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:40.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:40 vm00 ceph-mon[47668]: pgmap v569: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:43.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:42 vm00 ceph-mon[47668]: pgmap v570: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:42 vm08 ceph-mon[56824]: pgmap v570: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:44.952 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:44.953 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:44.977 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:44.978 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:44 vm08 ceph-mon[56824]: pgmap v571: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:44 vm00 ceph-mon[47668]: pgmap v571: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:46 vm08 ceph-mon[56824]: pgmap v572: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:46 vm00 ceph-mon[47668]: pgmap v572: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:49.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:48 vm08 ceph-mon[56824]: pgmap v573: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:48 vm00 ceph-mon[47668]: pgmap v573: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:49.979 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:49.980 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:50.007 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:50.007 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:50.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:50 vm08 ceph-mon[56824]: pgmap v574: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:50.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:50 vm00 ceph-mon[47668]: pgmap v574: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:52 vm00 ceph-mon[47668]: pgmap v575: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:52 vm08 ceph-mon[56824]: pgmap v575: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:55.009 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:22:55.010 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:22:55.037 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:22:55.037 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:22:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:54 vm08 ceph-mon[56824]: pgmap v576: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:55.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:54 vm00 ceph-mon[47668]: pgmap v576: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:57.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:56 vm08 ceph-mon[56824]: pgmap v577: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:57.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:56 vm00 ceph-mon[47668]: pgmap v577: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:22:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:22:58 vm08 ceph-mon[56824]: pgmap v578: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:22:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:22:58 vm00 ceph-mon[47668]: pgmap v578: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:00.039 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:00.039 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:00.066 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:00.067 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:00.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:00 vm08 ceph-mon[56824]: pgmap v579: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:00.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:00 vm00 ceph-mon[47668]: pgmap v579: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:02 vm00 ceph-mon[47668]: pgmap v580: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:03.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:02 vm08 ceph-mon[56824]: pgmap v580: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:05.068 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:05.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:05.096 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:05.096 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:04 vm08 ceph-mon[56824]: pgmap v581: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:04 vm00 ceph-mon[47668]: pgmap v581: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:23:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:23:06.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:23:06.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:23:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:06 vm08 ceph-mon[56824]: pgmap v582: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:06 vm00 ceph-mon[47668]: pgmap v582: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:08 vm08 ceph-mon[56824]: pgmap v583: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:08 vm00 ceph-mon[47668]: pgmap v583: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:10.098 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:10.098 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:10.124 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:10.125 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:10 vm08 ceph-mon[56824]: pgmap v584: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:10.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:10 vm00 ceph-mon[47668]: pgmap v584: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:12 vm00 ceph-mon[47668]: pgmap v585: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:12 vm08 ceph-mon[56824]: pgmap v585: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:15.126 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:15.127 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:14 vm08 ceph-mon[56824]: pgmap v586: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:15.152 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:15.152 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:14 vm00 ceph-mon[47668]: pgmap v586: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:16 vm08 ceph-mon[56824]: pgmap v587: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:16 vm00 ceph-mon[47668]: pgmap v587: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:18 vm08 ceph-mon[56824]: pgmap v588: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:23:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:23:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:23:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:23:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:23:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:18 vm00 ceph-mon[47668]: pgmap v588: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:23:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:23:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:23:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:23:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:23:20.154 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:20.154 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:20.181 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:20.182 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:20.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:20 vm08 ceph-mon[56824]: pgmap v589: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:20.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:20 vm00 ceph-mon[47668]: pgmap v589: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:23.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:22 vm00 ceph-mon[47668]: pgmap v590: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:22 vm08 ceph-mon[56824]: pgmap v590: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:24 vm08 ceph-mon[56824]: pgmap v591: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:24 vm00 ceph-mon[47668]: pgmap v591: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:25.183 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:25.183 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:25.209 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:25.210 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:26 vm08 ceph-mon[56824]: pgmap v592: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:27.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:26 vm00 ceph-mon[47668]: pgmap v592: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:28 vm08 ceph-mon[56824]: pgmap v593: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:28 vm00 ceph-mon[47668]: pgmap v593: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:30.211 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:30.212 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:30.237 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:30.237 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:30 vm08 ceph-mon[56824]: pgmap v594: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:30.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:30 vm00 ceph-mon[47668]: pgmap v594: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:32 vm00 ceph-mon[47668]: pgmap v595: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:32 vm08 ceph-mon[56824]: pgmap v595: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:34 vm08 ceph-mon[56824]: pgmap v596: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:34 vm00 ceph-mon[47668]: pgmap v596: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:35.239 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:35.239 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:35.266 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:35.266 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:36 vm08 ceph-mon[56824]: pgmap v597: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:36 vm00 ceph-mon[47668]: pgmap v597: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:38 vm08 ceph-mon[56824]: pgmap v598: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:38 vm00 ceph-mon[47668]: pgmap v598: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:40.268 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:40.269 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:40.357 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:40.358 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:40.981 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:40 vm00 ceph-mon[47668]: pgmap v599: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:40 vm08 ceph-mon[56824]: pgmap v599: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:43.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:42 vm00 ceph-mon[47668]: pgmap v600: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:42 vm08 ceph-mon[56824]: pgmap v600: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:45.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:44 vm00 ceph-mon[47668]: pgmap v601: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:23:45.360 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:45.360 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:44 vm08 ceph-mon[56824]: pgmap v601: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:23:45.410 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:45.411 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:46 vm00 ceph-mon[47668]: pgmap v602: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:46 vm08 ceph-mon[56824]: pgmap v602: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:48 vm00 ceph-mon[47668]: pgmap v603: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:23:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:48 vm08 ceph-mon[56824]: pgmap v603: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:23:50.412 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:50.413 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:50.441 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:50.442 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:50 vm08 ceph-mon[56824]: pgmap v604: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:23:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:50 vm00 ceph-mon[47668]: pgmap v604: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:23:53.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:52 vm00 ceph-mon[47668]: pgmap v605: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:52 vm08 ceph-mon[56824]: pgmap v605: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:54 vm08 ceph-mon[56824]: pgmap v606: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:54 vm00 ceph-mon[47668]: pgmap v606: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:55.443 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:23:55.444 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:23:55.469 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:23:55.470 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:23:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:56 vm08 ceph-mon[56824]: pgmap v607: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:56 vm00 ceph-mon[47668]: pgmap v607: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:23:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:23:58 vm08 ceph-mon[56824]: pgmap v608: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:23:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:23:58 vm00 ceph-mon[47668]: pgmap v608: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:00.471 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:00.472 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:00.497 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:00.498 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:00 vm08 ceph-mon[56824]: pgmap v609: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:00 vm00 ceph-mon[47668]: pgmap v609: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:02 vm00 ceph-mon[47668]: pgmap v610: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:02 vm08 ceph-mon[56824]: pgmap v610: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:04 vm08 ceph-mon[56824]: pgmap v611: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:04 vm00 ceph-mon[47668]: pgmap v611: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:05.499 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:05.500 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:05.530 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:05.531 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:24:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:24:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:24:06.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:24:06.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:06 vm08 ceph-mon[56824]: pgmap v612: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:07.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:06 vm00 ceph-mon[47668]: pgmap v612: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:08 vm08 ceph-mon[56824]: pgmap v613: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:08 vm00 ceph-mon[47668]: pgmap v613: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:10.532 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:10.533 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:10.559 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:10.559 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:10 vm08 ceph-mon[56824]: pgmap v614: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:10.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:10 vm00 ceph-mon[47668]: pgmap v614: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:12 vm00 ceph-mon[47668]: pgmap v615: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:12 vm08 ceph-mon[56824]: pgmap v615: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:14 vm08 ceph-mon[56824]: pgmap v616: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:14 vm00 ceph-mon[47668]: pgmap v616: 97 pgs: 97 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:15.560 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:15.561 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:15.586 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:15.586 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:16 vm08 ceph-mon[56824]: pgmap v617: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:16 vm00 ceph-mon[47668]: pgmap v617: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: pgmap v618: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:24:18.718 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:24:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: pgmap v618: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:24:19.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:24:19.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:24:19.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:24:19.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:24:19.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:24:20.588 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:20.588 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:20.676 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:20.676 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:20 vm08 ceph-mon[56824]: pgmap v619: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:20 vm00 ceph-mon[47668]: pgmap v619: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:22 vm00 ceph-mon[47668]: pgmap v620: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:22 vm08 ceph-mon[56824]: pgmap v620: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:25.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:24 vm08 ceph-mon[56824]: pgmap v621: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:24 vm00 ceph-mon[47668]: pgmap v621: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:25.678 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:25.678 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:25.705 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:25.705 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:26.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:26 vm08 ceph-mon[56824]: pgmap v622: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:26 vm00 ceph-mon[47668]: pgmap v622: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:28 vm08 ceph-mon[56824]: pgmap v623: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:28 vm00 ceph-mon[47668]: pgmap v623: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:30.707 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:30.707 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:30.733 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:30.733 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:30 vm08 ceph-mon[56824]: pgmap v624: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:30.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:30 vm00 ceph-mon[47668]: pgmap v624: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:32 vm00 ceph-mon[47668]: pgmap v625: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:32 vm08 ceph-mon[56824]: pgmap v625: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:34 vm08 ceph-mon[56824]: pgmap v626: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:35.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:34 vm00 ceph-mon[47668]: pgmap v626: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:35.735 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:35.735 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:35.760 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:35.761 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:36 vm08 ceph-mon[56824]: pgmap v627: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:36 vm00 ceph-mon[47668]: pgmap v627: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:38 vm08 ceph-mon[56824]: pgmap v628: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:38 vm00 ceph-mon[47668]: pgmap v628: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:40.762 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:40.763 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:40.787 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:40.788 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:40 vm08 ceph-mon[56824]: pgmap v629: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:40.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:40 vm00 ceph-mon[47668]: pgmap v629: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:42 vm00 ceph-mon[47668]: pgmap v630: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:42 vm08 ceph-mon[56824]: pgmap v630: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:44 vm08 ceph-mon[56824]: pgmap v631: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:44 vm00 ceph-mon[47668]: pgmap v631: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:45.789 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:45.790 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:45.818 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:45.818 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:46 vm08 ceph-mon[56824]: pgmap v632: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:46 vm00 ceph-mon[47668]: pgmap v632: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:48 vm08 ceph-mon[56824]: pgmap v633: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:48 vm00 ceph-mon[47668]: pgmap v633: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:50.820 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:50.820 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:50 vm08 ceph-mon[56824]: pgmap v634: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:50.892 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:50.893 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:50 vm00 ceph-mon[47668]: pgmap v634: 97 pgs: 97 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:52 vm00 ceph-mon[47668]: pgmap v635: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:52 vm08 ceph-mon[56824]: pgmap v635: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:55.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:54 vm08 ceph-mon[56824]: pgmap v636: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:54 vm00 ceph-mon[47668]: pgmap v636: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:55.894 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:24:55.894 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:24:55.920 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:24:55.920 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:24:56.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:56 vm08 ceph-mon[56824]: pgmap v637: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:56 vm00 ceph-mon[47668]: pgmap v637: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:24:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:24:58 vm08 ceph-mon[56824]: pgmap v638: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:24:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:24:58 vm00 ceph-mon[47668]: pgmap v638: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:00 vm08 ceph-mon[56824]: pgmap v639: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:00.921 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:00.922 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:00 vm00 ceph-mon[47668]: pgmap v639: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:00.947 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:00.948 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:03.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:02 vm00 ceph-mon[47668]: pgmap v640: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:02 vm08 ceph-mon[56824]: pgmap v640: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:04 vm08 ceph-mon[56824]: pgmap v641: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:04 vm00 ceph-mon[47668]: pgmap v641: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:05.949 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:05.949 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:05.975 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:05.975 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:25:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:25:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:25:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:25:07.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:06 vm08 ceph-mon[56824]: pgmap v642: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:07.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:06 vm00 ceph-mon[47668]: pgmap v642: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:09.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:08 vm08 ceph-mon[56824]: pgmap v643: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:09.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:08 vm00 ceph-mon[47668]: pgmap v643: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:10 vm08 ceph-mon[56824]: pgmap v644: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:10.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:10 vm00 ceph-mon[47668]: pgmap v644: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:10.977 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:10.977 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:11.003 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:11.004 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:12 vm00 ceph-mon[47668]: pgmap v645: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:12 vm08 ceph-mon[56824]: pgmap v645: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:14 vm08 ceph-mon[56824]: pgmap v646: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:14 vm00 ceph-mon[47668]: pgmap v646: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:16.006 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:16.006 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:16.033 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:16.033 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:16.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:16 vm08 ceph-mon[56824]: pgmap v647: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:16 vm00 ceph-mon[47668]: pgmap v647: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:19.064 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:18 vm08 ceph-mon[56824]: pgmap v648: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:19.064 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:25:19.064 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:25:19.064 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:18 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:25:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:18 vm00 ceph-mon[47668]: pgmap v648: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:25:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:25:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:18 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:25:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:25:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:25:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:25:20.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:25:20.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:25:20.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:25:21.034 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:21.035 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:21.067 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:21.068 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:21.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:20 vm08 ceph-mon[56824]: pgmap v649: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:20 vm00 ceph-mon[47668]: pgmap v649: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:23.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:23 vm08 ceph-mon[56824]: pgmap v650: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:23 vm00 ceph-mon[47668]: pgmap v650: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:25 vm08 ceph-mon[56824]: pgmap v651: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:25 vm00 ceph-mon[47668]: pgmap v651: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:26.069 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:26.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:26.096 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:26.096 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:27 vm08 ceph-mon[56824]: pgmap v652: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:27 vm00 ceph-mon[47668]: pgmap v652: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:29 vm08 ceph-mon[56824]: pgmap v653: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:29.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:29 vm00 ceph-mon[47668]: pgmap v653: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:30.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:30 vm08 ceph-mon[56824]: pgmap v654: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:30.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:30 vm00 ceph-mon[47668]: pgmap v654: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:31.098 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:31.098 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:31.125 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:31.126 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:32 vm00 ceph-mon[47668]: pgmap v655: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:32 vm08 ceph-mon[56824]: pgmap v655: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:34 vm08 ceph-mon[56824]: pgmap v656: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:35.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:34 vm00 ceph-mon[47668]: pgmap v656: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:36.127 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:36.128 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:36.154 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:36.155 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:36 vm08 ceph-mon[56824]: pgmap v657: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:36 vm00 ceph-mon[47668]: pgmap v657: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:38 vm08 ceph-mon[56824]: pgmap v658: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:38 vm00 ceph-mon[47668]: pgmap v658: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:40 vm08 ceph-mon[56824]: pgmap v659: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:40.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:40 vm00 ceph-mon[47668]: pgmap v659: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:41.156 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:41.157 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:41.182 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:41.182 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:42 vm00 ceph-mon[47668]: pgmap v660: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:42 vm08 ceph-mon[56824]: pgmap v660: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:44 vm08 ceph-mon[56824]: pgmap v661: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:44 vm00 ceph-mon[47668]: pgmap v661: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:46.183 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:46.184 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:46.210 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:46.210 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:46 vm08 ceph-mon[56824]: pgmap v662: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:46 vm00 ceph-mon[47668]: pgmap v662: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:48 vm08 ceph-mon[56824]: pgmap v663: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:48 vm00 ceph-mon[47668]: pgmap v663: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:50 vm08 ceph-mon[56824]: pgmap v664: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:50 vm00 ceph-mon[47668]: pgmap v664: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:51.211 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:51.212 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:51.238 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:51.238 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:52 vm00 ceph-mon[47668]: pgmap v665: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:52 vm08 ceph-mon[56824]: pgmap v665: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:54 vm08 ceph-mon[56824]: pgmap v666: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:54 vm00 ceph-mon[47668]: pgmap v666: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:56.239 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:25:56.240 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:25:56.270 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:25:56.270 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:25:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:56 vm08 ceph-mon[56824]: pgmap v667: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:56 vm00 ceph-mon[47668]: pgmap v667: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:25:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:25:58 vm08 ceph-mon[56824]: pgmap v668: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:25:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:25:58 vm00 ceph-mon[47668]: pgmap v668: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:00 vm08 ceph-mon[56824]: pgmap v669: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:00 vm00 ceph-mon[47668]: pgmap v669: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:01.271 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:01.272 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:01.297 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:01.297 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:02 vm00 ceph-mon[47668]: pgmap v670: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:03.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:02 vm08 ceph-mon[56824]: pgmap v670: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:05.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:04 vm08 ceph-mon[56824]: pgmap v671: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:04 vm00 ceph-mon[47668]: pgmap v671: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:26:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:26:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:26:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:26:06.299 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:06.299 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:06.326 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:06.327 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:06 vm08 ceph-mon[56824]: pgmap v672: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:06 vm00 ceph-mon[47668]: pgmap v672: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:08 vm08 ceph-mon[56824]: pgmap v673: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:08 vm00 ceph-mon[47668]: pgmap v673: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:10.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:10 vm08 ceph-mon[56824]: pgmap v674: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:10.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:10 vm00 ceph-mon[47668]: pgmap v674: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:11.328 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:11.328 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:11.356 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:11.356 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:12 vm00 ceph-mon[47668]: pgmap v675: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:13.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:12 vm08 ceph-mon[56824]: pgmap v675: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:14 vm08 ceph-mon[56824]: pgmap v676: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:14 vm00 ceph-mon[47668]: pgmap v676: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:16.358 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:16.358 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:16.384 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:16.384 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:17.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:16 vm08 ceph-mon[56824]: pgmap v677: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:16 vm00 ceph-mon[47668]: pgmap v677: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:19.104 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:18 vm00 ceph-mon[47668]: pgmap v678: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:19.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:18 vm08 ceph-mon[56824]: pgmap v678: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:20.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:26:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:26:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:26:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:26:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:26:20.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:26:20.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:26:20.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:26:20.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:26:20.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:26:20.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:26:20.189 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:26:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:20 vm08 ceph-mon[56824]: pgmap v679: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:21.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:20 vm00 ceph-mon[47668]: pgmap v679: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:21.386 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:21.386 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:21.412 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:21.413 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:22 vm00 ceph-mon[47668]: pgmap v680: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:22 vm08 ceph-mon[56824]: pgmap v680: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:25.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:24 vm08 ceph-mon[56824]: pgmap v681: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:25.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:24 vm00 ceph-mon[47668]: pgmap v681: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:26.415 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:26.415 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:26.443 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:26.444 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:27.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:26 vm08 ceph-mon[56824]: pgmap v682: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:26 vm00 ceph-mon[47668]: pgmap v682: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:28 vm08 ceph-mon[56824]: pgmap v683: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:28 vm00 ceph-mon[47668]: pgmap v683: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:30.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:30 vm00 ceph-mon[47668]: pgmap v684: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:30 vm08 ceph-mon[56824]: pgmap v684: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:31.445 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:31.446 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:31.472 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:31.472 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:33.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:32 vm00 ceph-mon[47668]: pgmap v685: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:32 vm08 ceph-mon[56824]: pgmap v685: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:34 vm08 ceph-mon[56824]: pgmap v686: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:35.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:34 vm00 ceph-mon[47668]: pgmap v686: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:36.474 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:36.474 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:36.501 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:36.502 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:36 vm08 ceph-mon[56824]: pgmap v687: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:36 vm00 ceph-mon[47668]: pgmap v687: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:38 vm08 ceph-mon[56824]: pgmap v688: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:38 vm00 ceph-mon[47668]: pgmap v688: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:40.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:40 vm00 ceph-mon[47668]: pgmap v689: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:40 vm08 ceph-mon[56824]: pgmap v689: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:41.503 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:41.503 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:41.530 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:41.531 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:42 vm00 ceph-mon[47668]: pgmap v690: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:42 vm08 ceph-mon[56824]: pgmap v690: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:44 vm08 ceph-mon[56824]: pgmap v691: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:44 vm00 ceph-mon[47668]: pgmap v691: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:46.532 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:46.533 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:46.558 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:46.559 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:47.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:46 vm08 ceph-mon[56824]: pgmap v692: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:47.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:46 vm00 ceph-mon[47668]: pgmap v692: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:48 vm08 ceph-mon[56824]: pgmap v693: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:48 vm00 ceph-mon[47668]: pgmap v693: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:50 vm00 ceph-mon[47668]: pgmap v694: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:50 vm08 ceph-mon[56824]: pgmap v694: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:51.560 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:51.561 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:51.587 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:51.587 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:52 vm00 ceph-mon[47668]: pgmap v695: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:52 vm08 ceph-mon[56824]: pgmap v695: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:54 vm08 ceph-mon[56824]: pgmap v696: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:54 vm00 ceph-mon[47668]: pgmap v696: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:56.589 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:26:56.590 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:26:56.620 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:26:56.620 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:26:57.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:56 vm08 ceph-mon[56824]: pgmap v697: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:57.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:56 vm00 ceph-mon[47668]: pgmap v697: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:26:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:26:58 vm08 ceph-mon[56824]: pgmap v698: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:26:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:26:58 vm00 ceph-mon[47668]: pgmap v698: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:00 vm00 ceph-mon[47668]: pgmap v699: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:01.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:00 vm08 ceph-mon[56824]: pgmap v699: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:01.622 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:01.622 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:01.649 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:01.649 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:03.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:02 vm00 ceph-mon[47668]: pgmap v700: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:03.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:02 vm08 ceph-mon[56824]: pgmap v700: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:04 vm08 ceph-mon[56824]: pgmap v701: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:04 vm00 ceph-mon[47668]: pgmap v701: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:27:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:27:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:27:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:05 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:27:06.651 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:06.651 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:06.677 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:06.677 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:07.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:06 vm08 ceph-mon[56824]: pgmap v702: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:06 vm00 ceph-mon[47668]: pgmap v702: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:08 vm08 ceph-mon[56824]: pgmap v703: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:09.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:08 vm00 ceph-mon[47668]: pgmap v703: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:10.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:10 vm00 ceph-mon[47668]: pgmap v704: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:10 vm08 ceph-mon[56824]: pgmap v704: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:11.679 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:11.679 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:11.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:11.706 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:13.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:12 vm00 ceph-mon[47668]: pgmap v705: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:12 vm08 ceph-mon[56824]: pgmap v705: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:14 vm08 ceph-mon[56824]: pgmap v706: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:14 vm00 ceph-mon[47668]: pgmap v706: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:16.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:16.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:16.848 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:16.849 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:17.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:16 vm08 ceph-mon[56824]: pgmap v707: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:17.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:16 vm00 ceph-mon[47668]: pgmap v707: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:18 vm08 ceph-mon[56824]: pgmap v708: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:18 vm00 ceph-mon[47668]: pgmap v708: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:19.932 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:27:19.932 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:27:19.932 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:19 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:27:19.962 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:27:19.962 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:27:19.962 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:19 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:27:21.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:20 vm00 ceph-mon[47668]: pgmap v709: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:21.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:21.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:27:21.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:20 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:20 vm08 ceph-mon[56824]: pgmap v709: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:27:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:20 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:21.851 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:21.852 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:21.879 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:21.880 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:22.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:22.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:22.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:22 vm00 ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:27:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:23 vm08 ceph-mon[56824]: pgmap v710: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:23 vm00 ceph-mon[47668]: pgmap v710: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:25 vm08 ceph-mon[56824]: pgmap v711: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:25.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:25 vm00 ceph-mon[47668]: pgmap v711: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:26.881 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:26.882 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:26.906 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:26.907 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:27 vm00 ceph-mon[47668]: pgmap v712: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:27 vm08 ceph-mon[56824]: pgmap v712: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:29 vm00 ceph-mon[47668]: pgmap v713: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:29 vm08 ceph-mon[56824]: pgmap v713: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:30.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:30 vm00 ceph-mon[47668]: pgmap v714: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:30 vm08 ceph-mon[56824]: pgmap v714: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:31.908 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:31.909 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:31.933 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:31.934 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:32 vm00 ceph-mon[47668]: pgmap v715: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:32 vm08 ceph-mon[56824]: pgmap v715: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:34 vm08 ceph-mon[56824]: pgmap v716: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:35.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:34 vm00 ceph-mon[47668]: pgmap v716: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:36.935 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:36.936 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:36.963 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:36.963 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:37.039 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:36 vm08 ceph-mon[56824]: pgmap v717: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:36 vm00 ceph-mon[47668]: pgmap v717: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:39.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:38 vm08 ceph-mon[56824]: pgmap v718: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:38 vm00 ceph-mon[47668]: pgmap v718: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:40.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:40 vm00 ceph-mon[47668]: pgmap v719: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:41.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:40 vm08 ceph-mon[56824]: pgmap v719: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:41.965 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:41.966 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:41.992 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:41.992 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:43.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:42 vm00 ceph-mon[47668]: pgmap v720: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:43.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:42 vm08 ceph-mon[56824]: pgmap v720: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:45.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:44 vm08 ceph-mon[56824]: pgmap v721: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:45.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:44 vm00 ceph-mon[47668]: pgmap v721: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:46.993 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:46.994 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:47.022 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:47.022 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:47.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:46 vm08 ceph-mon[56824]: pgmap v722: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:46 vm00 ceph-mon[47668]: pgmap v722: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:49.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:48 vm08 ceph-mon[56824]: pgmap v723: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:48 vm00 ceph-mon[47668]: pgmap v723: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:50 vm08 ceph-mon[56824]: pgmap v724: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:50 vm00 ceph-mon[47668]: pgmap v724: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:52.023 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:52.024 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00:/fake /mnt/foo -o sync 2026-03-08T23:27:52.049 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:52.049 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:53.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:52 vm00 ceph-mon[47668]: pgmap v725: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:52 vm08 ceph-mon[56824]: pgmap v725: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:54 vm08 ceph-mon[56824]: pgmap v726: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:54 vm00 ceph-mon[47668]: pgmap v726: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:57.051 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:27:57.052 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:27:57.081 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:27:57.082 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:27:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:56 vm08 ceph-mon[56824]: pgmap v727: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:56 vm00 ceph-mon[47668]: pgmap v727: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:27:59.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:27:58 vm08 ceph-mon[56824]: pgmap v728: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:27:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:27:58 vm00.local ceph-mon[47668]: pgmap v728: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:01.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:00 vm08 ceph-mon[56824]: pgmap v729: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:00 vm00.local ceph-mon[47668]: pgmap v729: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:02.083 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:02.084 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:02.110 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:02.110 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:02 vm08 ceph-mon[56824]: pgmap v730: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:03.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:02 vm00.local ceph-mon[47668]: pgmap v730: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:05.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:04 vm00.local ceph-mon[47668]: pgmap v731: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:04 vm08 ceph-mon[56824]: pgmap v731: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:28:06.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:28:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:28:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:05 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:28:07.112 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:07.113 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:07.140 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:07.141 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:06 vm00.local ceph-mon[47668]: pgmap v732: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:06 vm08 ceph-mon[56824]: pgmap v732: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:08 vm00.local ceph-mon[47668]: pgmap v733: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:08 vm08 ceph-mon[56824]: pgmap v733: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:10 vm00.local ceph-mon[47668]: pgmap v734: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:10 vm08 ceph-mon[56824]: pgmap v734: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:12.142 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:12.143 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:12.168 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:12.169 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:13.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:12 vm00.local ceph-mon[47668]: pgmap v735: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:12 vm08 ceph-mon[56824]: pgmap v735: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:14 vm00.local ceph-mon[47668]: pgmap v736: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:14 vm08 ceph-mon[56824]: pgmap v736: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:17.171 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:17.171 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:17.197 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:17.197 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:16 vm08 ceph-mon[56824]: pgmap v737: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:16 vm00.local ceph-mon[47668]: pgmap v737: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:18 vm08 ceph-mon[56824]: pgmap v738: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:18 vm00.local ceph-mon[47668]: pgmap v738: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:21.350 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:21 vm08 ceph-mon[56824]: pgmap v739: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:21.360 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:21 vm00.local ceph-mon[47668]: pgmap v739: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:22.199 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:22.199 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:22.227 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:22.227 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:22.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:28:22.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:28:22.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:28:22.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:28:22.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:22 vm08 ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:28:22.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:28:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:28:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:28:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:28:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:28:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:23 vm08 ceph-mon[56824]: pgmap v740: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:23 vm00.local ceph-mon[47668]: pgmap v740: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:25 vm08 ceph-mon[56824]: pgmap v741: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:25.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:25 vm00.local ceph-mon[47668]: pgmap v741: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:27.230 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:27.230 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:27.259 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:27.259 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:27 vm08 ceph-mon[56824]: pgmap v742: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:27 vm00.local ceph-mon[47668]: pgmap v742: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:29 vm08.local ceph-mon[56824]: pgmap v743: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:29 vm00.local ceph-mon[47668]: pgmap v743: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:31 vm08.local ceph-mon[56824]: pgmap v744: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:31 vm00.local ceph-mon[47668]: pgmap v744: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:32.260 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:32.261 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:32.287 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:32.287 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:33 vm08.local ceph-mon[56824]: pgmap v745: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:33 vm00.local ceph-mon[47668]: pgmap v745: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:35 vm08.local ceph-mon[56824]: pgmap v746: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:35 vm00.local ceph-mon[47668]: pgmap v746: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:37.130 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:37 vm08.local ceph-mon[56824]: pgmap v747: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:37.289 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:37.289 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:37.318 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:37.318 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:37 vm00.local ceph-mon[47668]: pgmap v747: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:39.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:39 vm08.local ceph-mon[56824]: pgmap v748: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:39.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:39 vm00.local ceph-mon[47668]: pgmap v748: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:41.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:41 vm08.local ceph-mon[56824]: pgmap v749: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:41 vm00.local ceph-mon[47668]: pgmap v749: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:42.320 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:42.320 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:42.346 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:42.347 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:43 vm08.local ceph-mon[56824]: pgmap v750: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:43 vm00.local ceph-mon[47668]: pgmap v750: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:45 vm08.local ceph-mon[56824]: pgmap v751: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:45 vm00.local ceph-mon[47668]: pgmap v751: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:47.348 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:47.349 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:47.374 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:47.374 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:47 vm08.local ceph-mon[56824]: pgmap v752: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:47 vm00.local ceph-mon[47668]: pgmap v752: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:49 vm08.local ceph-mon[56824]: pgmap v753: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:49 vm00.local ceph-mon[47668]: pgmap v753: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:51 vm08.local ceph-mon[56824]: pgmap v754: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:51.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:51 vm00.local ceph-mon[47668]: pgmap v754: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:52.376 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:52.376 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:52.401 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:52.402 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:53 vm08.local ceph-mon[56824]: pgmap v755: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:53 vm00.local ceph-mon[47668]: pgmap v755: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:55 vm08.local ceph-mon[56824]: pgmap v756: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:55.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:55 vm00.local ceph-mon[47668]: pgmap v756: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:57 vm08.local ceph-mon[56824]: pgmap v757: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:57.403 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:28:57.404 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:28:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:57 vm00.local ceph-mon[47668]: pgmap v757: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:28:57.430 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:28:57.430 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:28:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:28:59 vm08.local ceph-mon[56824]: pgmap v758: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:28:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:28:59 vm00.local ceph-mon[47668]: pgmap v758: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:01 vm08.local ceph-mon[56824]: pgmap v759: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:01 vm00.local ceph-mon[47668]: pgmap v759: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:02.432 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:02.432 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:02.458 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:02.459 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:03 vm08.local ceph-mon[56824]: pgmap v760: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:03 vm00.local ceph-mon[47668]: pgmap v760: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:05 vm08.local ceph-mon[56824]: pgmap v761: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:05 vm00.local ceph-mon[47668]: pgmap v761: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:29:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:29:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:29:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:29:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:07 vm00.local ceph-mon[47668]: pgmap v762: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:07.460 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:07.461 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:07.487 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:07.488 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:07 vm08.local ceph-mon[56824]: pgmap v762: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:09.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:09 vm00.local ceph-mon[47668]: pgmap v763: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:09 vm08.local ceph-mon[56824]: pgmap v763: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:11.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:11 vm00.local ceph-mon[47668]: pgmap v764: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:11 vm08.local ceph-mon[56824]: pgmap v764: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:12.489 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:12.490 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:12.517 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:12.517 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:13 vm00.local ceph-mon[47668]: pgmap v765: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:13 vm08.local ceph-mon[56824]: pgmap v765: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:15 vm00.local ceph-mon[47668]: pgmap v766: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:15 vm08.local ceph-mon[56824]: pgmap v766: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:17 vm00.local ceph-mon[47668]: pgmap v767: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:17.519 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:17.519 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:17.543 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:17.544 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:17 vm08.local ceph-mon[56824]: pgmap v767: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:19 vm00.local ceph-mon[47668]: pgmap v768: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:19 vm08.local ceph-mon[56824]: pgmap v768: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:21.425 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:21 vm00.local ceph-mon[47668]: pgmap v769: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:21.426 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:21 vm08.local ceph-mon[56824]: pgmap v769: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:22.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:29:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:29:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:29:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:29:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:29:22.545 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:22.546 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:22.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:22.573 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:29:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:29:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:29:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:29:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:29:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:23 vm00.local ceph-mon[47668]: pgmap v770: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:23 vm08.local ceph-mon[56824]: pgmap v770: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:24.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:24 vm08.local ceph-mon[56824]: pgmap v771: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:24.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:24 vm00.local ceph-mon[47668]: pgmap v771: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:27.574 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:27.574 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:27 vm08.local ceph-mon[56824]: pgmap v772: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:27 vm00.local ceph-mon[47668]: pgmap v772: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:27.782 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:27.800 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:28.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:28 vm08.local ceph-mon[56824]: pgmap v773: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:28.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:28 vm00.local ceph-mon[47668]: pgmap v773: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:30 vm08.local ceph-mon[56824]: pgmap v774: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:30 vm00.local ceph-mon[47668]: pgmap v774: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:32.783 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:32.784 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:32.813 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:32.814 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:32 vm00.local ceph-mon[47668]: pgmap v775: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:32 vm08.local ceph-mon[56824]: pgmap v775: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:35.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:34 vm08.local ceph-mon[56824]: pgmap v776: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:34 vm00.local ceph-mon[47668]: pgmap v776: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:36 vm08.local ceph-mon[56824]: pgmap v777: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:36 vm00.local ceph-mon[47668]: pgmap v777: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:37.815 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:37.816 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:37.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:37.842 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:38 vm08.local ceph-mon[56824]: pgmap v778: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:38 vm00.local ceph-mon[47668]: pgmap v778: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:40 vm08.local ceph-mon[56824]: pgmap v779: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:41.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:40 vm00.local ceph-mon[47668]: pgmap v779: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:42.843 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:42.843 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:42.869 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:42.869 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:42 vm00.local ceph-mon[47668]: pgmap v780: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:42 vm08.local ceph-mon[56824]: pgmap v780: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:45.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:44 vm08.local ceph-mon[56824]: pgmap v781: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:45.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:44 vm00.local ceph-mon[47668]: pgmap v781: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:46 vm08.local ceph-mon[56824]: pgmap v782: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:46 vm00.local ceph-mon[47668]: pgmap v782: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:47.871 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:47.872 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:47.898 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:47.899 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:48 vm08.local ceph-mon[56824]: pgmap v783: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:48 vm00.local ceph-mon[47668]: pgmap v783: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:51.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:50 vm08.local ceph-mon[56824]: pgmap v784: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:51.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:50 vm00.local ceph-mon[47668]: pgmap v784: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:52.900 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:52.901 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:52.926 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:52.927 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:52 vm08.local ceph-mon[56824]: pgmap v785: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:52 vm00.local ceph-mon[47668]: pgmap v785: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:54 vm08.local ceph-mon[56824]: pgmap v786: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:55.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:54 vm00.local ceph-mon[47668]: pgmap v786: 97 pgs: 97 active+clean; 453 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:56 vm08.local ceph-mon[56824]: pgmap v787: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:56 vm00.local ceph-mon[47668]: pgmap v787: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:29:57.928 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:29:57.929 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:29:57.954 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:29:57.954 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:29:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:29:58 vm00.local ceph-mon[47668]: pgmap v788: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:29:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:29:58 vm08.local ceph-mon[56824]: pgmap v788: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:01.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:01 vm08.local ceph-mon[56824]: pgmap v789: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:01.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:01 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-08T23:30:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:01 vm00.local ceph-mon[47668]: pgmap v789: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:01 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-08T23:30:02.956 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:02.956 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:02.982 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:02.983 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:03 vm08.local ceph-mon[56824]: pgmap v790: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:03 vm00.local ceph-mon[47668]: pgmap v790: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:05 vm08.local ceph-mon[56824]: pgmap v791: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:05 vm00.local ceph-mon[47668]: pgmap v791: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:30:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:30:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:30:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:30:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:07 vm08.local ceph-mon[56824]: pgmap v792: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:07 vm00.local ceph-mon[47668]: pgmap v792: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:07.984 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:07.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:08.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:08.012 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:09 vm08.local ceph-mon[56824]: pgmap v793: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:09 vm00.local ceph-mon[47668]: pgmap v793: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:11 vm08.local ceph-mon[56824]: pgmap v794: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:11 vm00.local ceph-mon[47668]: pgmap v794: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:13.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:13.014 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:13.040 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:13.041 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:13 vm08.local ceph-mon[56824]: pgmap v795: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:13 vm00.local ceph-mon[47668]: pgmap v795: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:15 vm08.local ceph-mon[56824]: pgmap v796: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:15 vm00.local ceph-mon[47668]: pgmap v796: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:17 vm08.local ceph-mon[56824]: pgmap v797: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:17 vm00.local ceph-mon[47668]: pgmap v797: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:18.042 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:18.043 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:18.160 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:18.160 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:19 vm00.local ceph-mon[47668]: pgmap v798: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:19.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:19 vm08.local ceph-mon[56824]: pgmap v798: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:21.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:21 vm00.local ceph-mon[47668]: pgmap v799: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:21 vm08.local ceph-mon[56824]: pgmap v799: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:30:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:30:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:30:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:30:22.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:30:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:30:22.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:30:22.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:30:22.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:30:22.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:30:23.161 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:23.162 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:23.187 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:23.187 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:23 vm00.local ceph-mon[47668]: pgmap v800: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:23 vm08.local ceph-mon[56824]: pgmap v800: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:25 vm08.local ceph-mon[56824]: pgmap v801: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:25 vm00.local ceph-mon[47668]: pgmap v801: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:27 vm08.local ceph-mon[56824]: pgmap v802: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:27 vm00.local ceph-mon[47668]: pgmap v802: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:28.188 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:28.189 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:28.214 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:28.215 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:29 vm08.local ceph-mon[56824]: pgmap v803: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:29 vm00.local ceph-mon[47668]: pgmap v803: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:30 vm08.local ceph-mon[56824]: pgmap v804: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:30 vm00.local ceph-mon[47668]: pgmap v804: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:32 vm08.local ceph-mon[56824]: pgmap v805: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:32 vm00.local ceph-mon[47668]: pgmap v805: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:33.216 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:33.217 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:33.241 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:33.242 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:34 vm08.local ceph-mon[56824]: pgmap v806: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:34 vm00.local ceph-mon[47668]: pgmap v806: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:37.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:36 vm08.local ceph-mon[56824]: pgmap v807: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:36 vm00.local ceph-mon[47668]: pgmap v807: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:38.243 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:38.244 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:38.269 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:38.270 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:38 vm08.local ceph-mon[56824]: pgmap v808: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:38 vm00.local ceph-mon[47668]: pgmap v808: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:40 vm08.local ceph-mon[56824]: pgmap v809: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:40 vm00.local ceph-mon[47668]: pgmap v809: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:42 vm08.local ceph-mon[56824]: pgmap v810: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:43.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:42 vm00.local ceph-mon[47668]: pgmap v810: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:43.271 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:43.271 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:43.297 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:43.297 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:44 vm00.local ceph-mon[47668]: pgmap v811: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:44 vm08.local ceph-mon[56824]: pgmap v811: 97 pgs: 97 active+clean; 453 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:47 vm08.local ceph-mon[56824]: pgmap v812: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:47 vm00.local ceph-mon[47668]: pgmap v812: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:48.298 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:48.299 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:48.326 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:48.327 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:49 vm08.local ceph-mon[56824]: pgmap v813: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:49 vm00.local ceph-mon[47668]: pgmap v813: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:51 vm08.local ceph-mon[56824]: pgmap v814: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:51.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:51 vm00.local ceph-mon[47668]: pgmap v814: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:53.328 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:53.328 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:53.354 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:53.355 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:53 vm08.local ceph-mon[56824]: pgmap v815: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:53 vm00.local ceph-mon[47668]: pgmap v815: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:55 vm08.local ceph-mon[56824]: pgmap v816: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:55 vm00.local ceph-mon[47668]: pgmap v816: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:57 vm08.local ceph-mon[56824]: pgmap v817: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:57 vm00.local ceph-mon[47668]: pgmap v817: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:30:58.356 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:30:58.357 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:30:58.382 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:30:58.383 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:30:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:30:59 vm08.local ceph-mon[56824]: pgmap v818: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:30:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:30:59 vm00.local ceph-mon[47668]: pgmap v818: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:01 vm08.local ceph-mon[56824]: pgmap v819: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:01.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:01 vm00.local ceph-mon[47668]: pgmap v819: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:03 vm08.local ceph-mon[56824]: pgmap v820: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:03.384 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:03.385 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:03.412 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:03.413 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:03 vm00.local ceph-mon[47668]: pgmap v820: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:05 vm08.local ceph-mon[56824]: pgmap v821: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:05 vm00.local ceph-mon[47668]: pgmap v821: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:31:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:31:06.477 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:31:06.477 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:31:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:07 vm00.local ceph-mon[47668]: pgmap v822: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:07 vm08.local ceph-mon[56824]: pgmap v822: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:08.414 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:08.415 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:08.442 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:08.443 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:09 vm00.local ceph-mon[47668]: pgmap v823: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:09 vm08.local ceph-mon[56824]: pgmap v823: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:11 vm00.local ceph-mon[47668]: pgmap v824: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:11 vm08.local ceph-mon[56824]: pgmap v824: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:13 vm00.local ceph-mon[47668]: pgmap v825: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:13.444 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:13.445 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:13.470 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:13.471 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:13 vm08.local ceph-mon[56824]: pgmap v825: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:15 vm08.local ceph-mon[56824]: pgmap v826: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:15 vm00.local ceph-mon[47668]: pgmap v826: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:17.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:16 vm08.local ceph-mon[56824]: pgmap v827: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:16 vm00.local ceph-mon[47668]: pgmap v827: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:18.472 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:18.472 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:18.498 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:18.498 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:18 vm08.local ceph-mon[56824]: pgmap v828: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:18 vm00.local ceph-mon[47668]: pgmap v828: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:20 vm08.local ceph-mon[56824]: pgmap v829: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:21.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:20 vm00.local ceph-mon[47668]: pgmap v829: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:22 vm00.local ceph-mon[47668]: pgmap v830: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:31:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:31:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:22 vm08.local ceph-mon[56824]: pgmap v830: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:31:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:31:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:31:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:31:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:31:23.500 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:23.500 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:23.527 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:23.527 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:24 vm08.local ceph-mon[56824]: pgmap v831: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:24 vm00.local ceph-mon[47668]: pgmap v831: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:27 vm08.local ceph-mon[56824]: pgmap v832: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:27 vm00.local ceph-mon[47668]: pgmap v832: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:28.528 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:28.529 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:28.555 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:28.556 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:28 vm08.local ceph-mon[56824]: pgmap v833: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:28.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:28 vm00.local ceph-mon[47668]: pgmap v833: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:31 vm08.local ceph-mon[56824]: pgmap v834: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:31 vm00.local ceph-mon[47668]: pgmap v834: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:33 vm00.local ceph-mon[47668]: pgmap v835: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:33.557 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:33.558 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:33.585 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:33.585 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:33 vm08.local ceph-mon[56824]: pgmap v835: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:34.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:34 vm08.local ceph-mon[56824]: pgmap v836: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:34.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:34 vm00.local ceph-mon[47668]: pgmap v836: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:36.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:36 vm08.local ceph-mon[56824]: pgmap v837: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:36.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:36 vm00.local ceph-mon[47668]: pgmap v837: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:38.587 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:38.587 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:38.612 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:38.613 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:38 vm08.local ceph-mon[56824]: pgmap v838: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:38 vm00.local ceph-mon[47668]: pgmap v838: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:41 vm08.local ceph-mon[56824]: pgmap v839: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:41.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:41 vm00.local ceph-mon[47668]: pgmap v839: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:43 vm08.local ceph-mon[56824]: pgmap v840: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:43 vm00.local ceph-mon[47668]: pgmap v840: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:43.614 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:43.615 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:43.641 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:43.641 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:45 vm00.local ceph-mon[47668]: pgmap v841: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:45 vm08.local ceph-mon[56824]: pgmap v841: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:47 vm00.local ceph-mon[47668]: pgmap v842: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:47 vm08.local ceph-mon[56824]: pgmap v842: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:48.642 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:48.643 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:48.667 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:48.668 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:49 vm00.local ceph-mon[47668]: pgmap v843: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:49 vm08.local ceph-mon[56824]: pgmap v843: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:51 vm00.local ceph-mon[47668]: pgmap v844: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:51 vm08.local ceph-mon[56824]: pgmap v844: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:53 vm00.local ceph-mon[47668]: pgmap v845: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:53 vm08.local ceph-mon[56824]: pgmap v845: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:53.669 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:53.670 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:53.695 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:53.695 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:55 vm00.local ceph-mon[47668]: pgmap v846: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:55 vm08.local ceph-mon[56824]: pgmap v846: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:57 vm08.local ceph-mon[56824]: pgmap v847: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:57 vm00.local ceph-mon[47668]: pgmap v847: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:31:58.696 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:31:58.697 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:31:58.725 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:31:58.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:31:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:31:59 vm08.local ceph-mon[56824]: pgmap v848: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:31:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:31:59 vm00.local ceph-mon[47668]: pgmap v848: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:01 vm08.local ceph-mon[56824]: pgmap v849: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:01 vm00.local ceph-mon[47668]: pgmap v849: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:03 vm00.local ceph-mon[47668]: pgmap v850: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:03 vm08.local ceph-mon[56824]: pgmap v850: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:03.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:03.728 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:03.754 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:03.755 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:04 vm08.local ceph-mon[56824]: pgmap v851: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:04.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:04 vm00.local ceph-mon[47668]: pgmap v851: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:32:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:32:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:32:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:32:07.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:07 vm00.local ceph-mon[47668]: pgmap v852: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:07 vm08.local ceph-mon[56824]: pgmap v852: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:08.757 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:08.758 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:08.784 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:08.784 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:09 vm00.local ceph-mon[47668]: pgmap v853: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:09 vm08.local ceph-mon[56824]: pgmap v853: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:11 vm00.local ceph-mon[47668]: pgmap v854: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:11 vm08.local ceph-mon[56824]: pgmap v854: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:13.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:13 vm00.local ceph-mon[47668]: pgmap v855: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:13 vm08.local ceph-mon[56824]: pgmap v855: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:13.786 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:13.786 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:13.813 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:13.814 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:15 vm00.local ceph-mon[47668]: pgmap v856: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:15 vm08.local ceph-mon[56824]: pgmap v856: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:17.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:17 vm00.local ceph-mon[47668]: pgmap v857: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:17 vm08.local ceph-mon[56824]: pgmap v857: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:18.815 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:18.816 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:18.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:18.841 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:19.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:19 vm00.local ceph-mon[47668]: pgmap v858: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:19 vm08.local ceph-mon[56824]: pgmap v858: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:21.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:21 vm00.local ceph-mon[47668]: pgmap v859: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:21 vm08.local ceph-mon[56824]: pgmap v859: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:22.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:32:22.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:32:22.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:32:22.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:32:22.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:32:22.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:32:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:23 vm00.local ceph-mon[47668]: pgmap v860: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:32:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:32:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:23 vm08.local ceph-mon[56824]: pgmap v860: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:23.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:32:23.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:32:23.842 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:23.843 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:23.869 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:23.870 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:25 vm08.local ceph-mon[56824]: pgmap v861: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:25 vm00.local ceph-mon[47668]: pgmap v861: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:27 vm08.local ceph-mon[56824]: pgmap v862: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:27 vm00.local ceph-mon[47668]: pgmap v862: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:28.871 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:28.872 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:28.899 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:28.899 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:29 vm08.local ceph-mon[56824]: pgmap v863: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:29 vm00.local ceph-mon[47668]: pgmap v863: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:31 vm08.local ceph-mon[56824]: pgmap v864: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:31 vm00.local ceph-mon[47668]: pgmap v864: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:33 vm00.local ceph-mon[47668]: pgmap v865: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:33 vm08.local ceph-mon[56824]: pgmap v865: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:33.901 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:33.901 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:33.927 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:33.928 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:34.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:34 vm08.local ceph-mon[56824]: pgmap v866: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:34.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:34 vm00.local ceph-mon[47668]: pgmap v866: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:37 vm00.local ceph-mon[47668]: pgmap v867: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:37 vm08.local ceph-mon[56824]: pgmap v867: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:38.929 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:38.930 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:38.955 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:38.955 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:39.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:39 vm00.local ceph-mon[47668]: pgmap v868: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:39 vm08.local ceph-mon[56824]: pgmap v868: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:41 vm00.local ceph-mon[47668]: pgmap v869: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:41 vm08.local ceph-mon[56824]: pgmap v869: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:43 vm00.local ceph-mon[47668]: pgmap v870: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:43 vm08.local ceph-mon[56824]: pgmap v870: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:43.956 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:43.957 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:43.981 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:43.982 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:45 vm00.local ceph-mon[47668]: pgmap v871: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:45 vm08.local ceph-mon[56824]: pgmap v871: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:47 vm08.local ceph-mon[56824]: pgmap v872: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:47 vm00.local ceph-mon[47668]: pgmap v872: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:48.983 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:48.984 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:49.010 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:49.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:49 vm08.local ceph-mon[56824]: pgmap v873: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:49 vm00.local ceph-mon[47668]: pgmap v873: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:51 vm08.local ceph-mon[56824]: pgmap v874: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:51 vm00.local ceph-mon[47668]: pgmap v874: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:53 vm00.local ceph-mon[47668]: pgmap v875: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:53 vm08.local ceph-mon[56824]: pgmap v875: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:54.012 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:54.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:54.039 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:54.040 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:32:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:55 vm08.local ceph-mon[56824]: pgmap v876: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:55 vm00.local ceph-mon[47668]: pgmap v876: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:57 vm08.local ceph-mon[56824]: pgmap v877: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:57 vm00.local ceph-mon[47668]: pgmap v877: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:32:58.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:32:58 vm08.local ceph-mon[56824]: pgmap v878: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:58.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:32:58 vm00.local ceph-mon[47668]: pgmap v878: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:32:59.041 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:32:59.042 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:32:59.107 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:32:59.107 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:01 vm08.local ceph-mon[56824]: pgmap v879: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:01.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:01 vm00.local ceph-mon[47668]: pgmap v879: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:02.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:02 vm08.local ceph-mon[56824]: pgmap v880: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:02.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:02 vm00.local ceph-mon[47668]: pgmap v880: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:04.109 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:04.109 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:04.222 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:04.223 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:04 vm08.local ceph-mon[56824]: pgmap v881: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:04 vm00.local ceph-mon[47668]: pgmap v881: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:33:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:33:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:33:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:33:07.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:07 vm00.local ceph-mon[47668]: pgmap v882: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:07 vm08.local ceph-mon[56824]: pgmap v882: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:09.224 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:09.225 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:09.297 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:09.298 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:09 vm08.local ceph-mon[56824]: pgmap v883: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:09 vm00.local ceph-mon[47668]: pgmap v883: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:11 vm08.local ceph-mon[56824]: pgmap v884: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:11 vm00.local ceph-mon[47668]: pgmap v884: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:13 vm00.local ceph-mon[47668]: pgmap v885: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:13 vm08.local ceph-mon[56824]: pgmap v885: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:14.299 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:14.300 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:14.327 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:14.328 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:15 vm08.local ceph-mon[56824]: pgmap v886: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:15 vm00.local ceph-mon[47668]: pgmap v886: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:17 vm08.local ceph-mon[56824]: pgmap v887: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:17 vm00.local ceph-mon[47668]: pgmap v887: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:19.329 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:19.330 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:19.356 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:19.357 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:19 vm08.local ceph-mon[56824]: pgmap v888: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:19 vm00.local ceph-mon[47668]: pgmap v888: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:21 vm00.local ceph-mon[47668]: pgmap v889: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:21 vm08.local ceph-mon[56824]: pgmap v889: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:23 vm00.local ceph-mon[47668]: pgmap v890: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:33:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:33:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:33:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:33:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:33:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:23 vm08.local ceph-mon[56824]: pgmap v890: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:33:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:33:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:33:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:33:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:33:24.358 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:24.359 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:24.386 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:24.387 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:25 vm00.local ceph-mon[47668]: pgmap v891: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:25 vm08.local ceph-mon[56824]: pgmap v891: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:27 vm08.local ceph-mon[56824]: pgmap v892: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:27 vm00.local ceph-mon[47668]: pgmap v892: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:29.389 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:29.389 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:29.415 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:29.415 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:29 vm08.local ceph-mon[56824]: pgmap v893: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:29 vm00.local ceph-mon[47668]: pgmap v893: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:31 vm08.local ceph-mon[56824]: pgmap v894: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:31 vm00.local ceph-mon[47668]: pgmap v894: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:33 vm00.local ceph-mon[47668]: pgmap v895: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:33 vm08.local ceph-mon[56824]: pgmap v895: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:34.417 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:34.417 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:34.442 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:34.442 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:35 vm08.local ceph-mon[56824]: pgmap v896: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:35.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:35 vm00.local ceph-mon[47668]: pgmap v896: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:37 vm08.local ceph-mon[56824]: pgmap v897: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:37 vm00.local ceph-mon[47668]: pgmap v897: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:39.444 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:39.444 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:39.469 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:39.469 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:39 vm08.local ceph-mon[56824]: pgmap v898: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:39 vm00.local ceph-mon[47668]: pgmap v898: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:41 vm08.local ceph-mon[56824]: pgmap v899: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:41 vm00.local ceph-mon[47668]: pgmap v899: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:43.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:43 vm00.local ceph-mon[47668]: pgmap v900: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:43 vm08.local ceph-mon[56824]: pgmap v900: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:44.471 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:44.471 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:44.496 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:44.497 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:45 vm08.local ceph-mon[56824]: pgmap v901: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:45 vm00.local ceph-mon[47668]: pgmap v901: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:46.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:46 vm08.local ceph-mon[56824]: pgmap v902: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:46.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:46 vm00.local ceph-mon[47668]: pgmap v902: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:48 vm08.local ceph-mon[56824]: pgmap v903: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:48 vm00.local ceph-mon[47668]: pgmap v903: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:49.498 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:49.499 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:49.525 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:49.526 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:51 vm00.local ceph-mon[47668]: pgmap v904: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:51 vm08.local ceph-mon[56824]: pgmap v904: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:53 vm00.local ceph-mon[47668]: pgmap v905: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:53 vm08.local ceph-mon[56824]: pgmap v905: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:54.527 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:54.528 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:54.552 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:54.553 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:33:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:55 vm08.local ceph-mon[56824]: pgmap v906: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:55 vm00.local ceph-mon[47668]: pgmap v906: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:57 vm08.local ceph-mon[56824]: pgmap v907: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:57 vm00.local ceph-mon[47668]: pgmap v907: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:33:59.554 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:33:59.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:33:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:33:59 vm08.local ceph-mon[56824]: pgmap v908: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:33:59 vm00.local ceph-mon[47668]: pgmap v908: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:33:59.769 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:33:59.770 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:00 vm08.local ceph-mon[56824]: pgmap v909: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:00.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:00 vm00.local ceph-mon[47668]: pgmap v909: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:03.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:02 vm00.local ceph-mon[47668]: pgmap v910: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:02 vm08.local ceph-mon[56824]: pgmap v910: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:04.772 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:04.772 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:04.800 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:04.801 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:05 vm08.local ceph-mon[56824]: pgmap v911: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:05 vm00.local ceph-mon[47668]: pgmap v911: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:34:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:34:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:06 vm08.local ceph-mon[56824]: pgmap v912: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:34:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:34:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:06 vm00.local ceph-mon[47668]: pgmap v912: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:09 vm00.local ceph-mon[47668]: pgmap v913: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:09 vm08.local ceph-mon[56824]: pgmap v913: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:09.802 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:09.803 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:09.951 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:09.952 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:10 vm08.local ceph-mon[56824]: pgmap v914: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:10.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:10 vm00.local ceph-mon[47668]: pgmap v914: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:12 vm00.local ceph-mon[47668]: pgmap v915: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:12 vm08.local ceph-mon[56824]: pgmap v915: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:14.953 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:14.954 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:14.980 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:14.981 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:15.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:14 vm08.local ceph-mon[56824]: pgmap v916: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:15.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:14 vm00.local ceph-mon[47668]: pgmap v916: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:16.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:16 vm08.local ceph-mon[56824]: pgmap v917: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:16.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:16 vm00.local ceph-mon[47668]: pgmap v917: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:18 vm08.local ceph-mon[56824]: pgmap v918: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:18 vm00.local ceph-mon[47668]: pgmap v918: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:19.983 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:19.983 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:20.009 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:20.009 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:20 vm08.local ceph-mon[56824]: pgmap v919: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:20 vm00.local ceph-mon[47668]: pgmap v919: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:23.074 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:22 vm00.local ceph-mon[47668]: pgmap v920: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:23.074 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:34:23.074 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:34:23.074 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:22 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:34:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:22 vm08.local ceph-mon[56824]: pgmap v920: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:34:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:34:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:22 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:34:23.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:34:23.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:34:23.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:34:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:34:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:34:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:34:25.011 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:25.011 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:25.038 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:25.038 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:25 vm08.local ceph-mon[56824]: pgmap v921: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:25 vm00.local ceph-mon[47668]: pgmap v921: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:26.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:26 vm08.local ceph-mon[56824]: pgmap v922: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:26.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:26 vm00.local ceph-mon[47668]: pgmap v922: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:28 vm08.local ceph-mon[56824]: pgmap v923: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:28 vm00.local ceph-mon[47668]: pgmap v923: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:30.039 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:30.040 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:30.065 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:30.066 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:30 vm08.local ceph-mon[56824]: pgmap v924: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:30 vm00.local ceph-mon[47668]: pgmap v924: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:32 vm08.local ceph-mon[56824]: pgmap v925: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:32 vm00.local ceph-mon[47668]: pgmap v925: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:35.067 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:35.068 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:35.095 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:35.095 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:34 vm08.local ceph-mon[56824]: pgmap v926: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:34 vm00.local ceph-mon[47668]: pgmap v926: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:36.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:36 vm08.local ceph-mon[56824]: pgmap v927: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:36.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:36 vm00.local ceph-mon[47668]: pgmap v927: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:38 vm08.local ceph-mon[56824]: pgmap v928: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:38 vm00.local ceph-mon[47668]: pgmap v928: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:40.096 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:40.097 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:40.124 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:40.124 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:40 vm08.local ceph-mon[56824]: pgmap v929: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:40 vm00.local ceph-mon[47668]: pgmap v929: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:43.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:42 vm00.local ceph-mon[47668]: pgmap v930: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:42 vm08.local ceph-mon[56824]: pgmap v930: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:45.126 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:45.126 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:44 vm08.local ceph-mon[56824]: pgmap v931: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:45.151 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:45.151 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:44 vm00.local ceph-mon[47668]: pgmap v931: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:46.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:46 vm08.local ceph-mon[56824]: pgmap v932: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:46.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:46 vm00.local ceph-mon[47668]: pgmap v932: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:48 vm08.local ceph-mon[56824]: pgmap v933: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:48 vm00.local ceph-mon[47668]: pgmap v933: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:50.153 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:50.153 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:50.179 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:50.180 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:50 vm08.local ceph-mon[56824]: pgmap v934: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:50 vm00.local ceph-mon[47668]: pgmap v934: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:52 vm00.local ceph-mon[47668]: pgmap v935: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:52 vm08.local ceph-mon[56824]: pgmap v935: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:54 vm08.local ceph-mon[56824]: pgmap v936: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:54 vm00.local ceph-mon[47668]: pgmap v936: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:55.181 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:34:55.182 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:34:55.207 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:34:55.208 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:34:56.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:56 vm08.local ceph-mon[56824]: pgmap v937: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:56.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:56 vm00.local ceph-mon[47668]: pgmap v937: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:34:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:34:58 vm08.local ceph-mon[56824]: pgmap v938: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:34:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:34:58 vm00.local ceph-mon[47668]: pgmap v938: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:00.209 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:00.210 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:00.235 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:00.235 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:00 vm08.local ceph-mon[56824]: pgmap v939: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:00 vm00.local ceph-mon[47668]: pgmap v939: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:02 vm08.local ceph-mon[56824]: pgmap v940: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:03.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:02 vm00.local ceph-mon[47668]: pgmap v940: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:04 vm08.local ceph-mon[56824]: pgmap v941: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:04 vm00.local ceph-mon[47668]: pgmap v941: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:05.236 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:05.237 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:05.520 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:05.521 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:35:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:35:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:06 vm08.local ceph-mon[56824]: pgmap v942: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:35:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:35:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:06 vm00.local ceph-mon[47668]: pgmap v942: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:08 vm08.local ceph-mon[56824]: pgmap v943: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:08 vm00.local ceph-mon[47668]: pgmap v943: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:10.522 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:10.523 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:10.548 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:10.549 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:10 vm08.local ceph-mon[56824]: pgmap v944: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:10 vm00.local ceph-mon[47668]: pgmap v944: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:12 vm08.local ceph-mon[56824]: pgmap v945: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:13.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:12 vm00.local ceph-mon[47668]: pgmap v945: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:14 vm08.local ceph-mon[56824]: pgmap v946: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:35:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:14 vm00.local ceph-mon[47668]: pgmap v946: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:35:15.551 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:15.551 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:15.579 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:15.580 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:16.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:16 vm00.local ceph-mon[47668]: pgmap v947: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:16 vm08.local ceph-mon[56824]: pgmap v947: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:18 vm08.local ceph-mon[56824]: pgmap v948: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:35:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:18 vm00.local ceph-mon[47668]: pgmap v948: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:35:20.581 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:20.582 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:20.610 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:20.610 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:20 vm08.local ceph-mon[56824]: pgmap v949: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:35:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:20 vm00.local ceph-mon[47668]: pgmap v949: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:35:23.113 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:22 vm00.local ceph-mon[47668]: pgmap v950: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:23.115 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:22 vm08.local ceph-mon[56824]: pgmap v950: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:23.843 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:35:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:35:24.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:35:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:24.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:35:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:24 vm08.local ceph-mon[56824]: pgmap v951: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:24 vm00.local ceph-mon[47668]: pgmap v951: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:25.611 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:25.612 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:25.639 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:25.640 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:26 vm08.local ceph-mon[56824]: pgmap v952: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:26 vm00.local ceph-mon[47668]: pgmap v952: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:28 vm08.local ceph-mon[56824]: pgmap v953: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:28 vm00.local ceph-mon[47668]: pgmap v953: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:30.642 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:30.642 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:30.668 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:30.669 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:30 vm08.local ceph-mon[56824]: pgmap v954: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:30 vm00.local ceph-mon[47668]: pgmap v954: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:33 vm08.local ceph-mon[56824]: pgmap v955: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:33 vm00.local ceph-mon[47668]: pgmap v955: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:35 vm08.local ceph-mon[56824]: pgmap v956: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:35 vm00.local ceph-mon[47668]: pgmap v956: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:35.670 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:35.671 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:35.698 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:35.699 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:36 vm08.local ceph-mon[56824]: pgmap v957: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:36 vm00.local ceph-mon[47668]: pgmap v957: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:38 vm08.local ceph-mon[56824]: pgmap v958: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:38 vm00.local ceph-mon[47668]: pgmap v958: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:40.700 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:40.701 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:40.727 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:40.727 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:40 vm08.local ceph-mon[56824]: pgmap v959: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:41.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:40 vm00.local ceph-mon[47668]: pgmap v959: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:42 vm08.local ceph-mon[56824]: pgmap v960: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:43.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:42 vm00.local ceph-mon[47668]: pgmap v960: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:44 vm08.local ceph-mon[56824]: pgmap v961: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:44 vm00.local ceph-mon[47668]: pgmap v961: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:45.729 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:45.730 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:45.755 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:45.756 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:46 vm08.local ceph-mon[56824]: pgmap v962: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:46 vm00.local ceph-mon[47668]: pgmap v962: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:48 vm08.local ceph-mon[56824]: pgmap v963: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:48 vm00.local ceph-mon[47668]: pgmap v963: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:50.758 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:50.758 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:50.783 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:50.783 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:50 vm08.local ceph-mon[56824]: pgmap v964: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:50 vm00.local ceph-mon[47668]: pgmap v964: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:52 vm08.local ceph-mon[56824]: pgmap v965: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:53.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:52 vm00.local ceph-mon[47668]: pgmap v965: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:54 vm08.local ceph-mon[56824]: pgmap v966: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:54 vm00.local ceph-mon[47668]: pgmap v966: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:55.785 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:35:55.785 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:35:55.816 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:35:55.817 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:35:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:56 vm08.local ceph-mon[56824]: pgmap v967: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:56 vm00.local ceph-mon[47668]: pgmap v967: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:35:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:35:58 vm00.local ceph-mon[47668]: pgmap v968: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:35:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:35:58 vm08.local ceph-mon[56824]: pgmap v968: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:00.818 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:00.819 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:00.844 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:00.844 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:00 vm00.local ceph-mon[47668]: pgmap v969: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:00 vm08.local ceph-mon[56824]: pgmap v969: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:03 vm08.local ceph-mon[56824]: pgmap v970: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:03 vm00.local ceph-mon[47668]: pgmap v970: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:05 vm08.local ceph-mon[56824]: pgmap v971: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:05 vm00.local ceph-mon[47668]: pgmap v971: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:05.846 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:05.846 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:06.042 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:06.042 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:36:06.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:36:06.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:06 vm08.local ceph-mon[56824]: pgmap v972: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:06.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:36:06.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:36:06.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:06 vm00.local ceph-mon[47668]: pgmap v972: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:08 vm08.local ceph-mon[56824]: pgmap v973: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:08 vm00.local ceph-mon[47668]: pgmap v973: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:11.044 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:11.044 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:11.072 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:11.073 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:11 vm08.local ceph-mon[56824]: pgmap v974: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:11 vm00.local ceph-mon[47668]: pgmap v974: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:13 vm08.local ceph-mon[56824]: pgmap v975: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:13 vm00.local ceph-mon[47668]: pgmap v975: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:15 vm00.local ceph-mon[47668]: pgmap v976: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:15 vm08.local ceph-mon[56824]: pgmap v976: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:16.074 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:16.075 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:16.103 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:16.104 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:16 vm08.local ceph-mon[56824]: pgmap v977: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:16 vm00.local ceph-mon[47668]: pgmap v977: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:18 vm08.local ceph-mon[56824]: pgmap v978: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:18 vm00.local ceph-mon[47668]: pgmap v978: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:21.106 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:21.106 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:20 vm08.local ceph-mon[56824]: pgmap v979: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:21.135 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:21.135 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:20 vm00.local ceph-mon[47668]: pgmap v979: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:22 vm08.local ceph-mon[56824]: pgmap v980: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:23.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:22 vm00.local ceph-mon[47668]: pgmap v980: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:23.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:36:23.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:36:23.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:23 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:36:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:36:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:36:24.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:23 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:36:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:24 vm08.local ceph-mon[56824]: pgmap v981: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:25.162 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:24 vm00.local ceph-mon[47668]: pgmap v981: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:26.136 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:26.137 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:26.163 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:26.163 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:26.507 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:36:26.507 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:36:26.507 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:36:26.507 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:36:26.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:36:26.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:36:26.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:36:26.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:36:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:27 vm08.local ceph-mon[56824]: pgmap v982: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:27 vm00.local ceph-mon[47668]: pgmap v982: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:28.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:28 vm08.local ceph-mon[56824]: pgmap v983: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:28.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:28 vm00.local ceph-mon[47668]: pgmap v983: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:30 vm08.local ceph-mon[56824]: pgmap v984: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:31.165 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:31.165 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:30 vm00.local ceph-mon[47668]: pgmap v984: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:31.214 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:31.214 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:32 vm08.local ceph-mon[56824]: pgmap v985: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:33.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:32 vm00.local ceph-mon[47668]: pgmap v985: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:34 vm08.local ceph-mon[56824]: pgmap v986: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:34 vm00.local ceph-mon[47668]: pgmap v986: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:36.216 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:36.216 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:36.243 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:36.243 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:36 vm08.local ceph-mon[56824]: pgmap v987: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:36 vm00.local ceph-mon[47668]: pgmap v987: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:38 vm08.local ceph-mon[56824]: pgmap v988: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:38 vm00.local ceph-mon[47668]: pgmap v988: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:40 vm08.local ceph-mon[56824]: pgmap v989: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:40 vm00.local ceph-mon[47668]: pgmap v989: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:41.245 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:41.245 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:41.272 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:41.273 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:42 vm08.local ceph-mon[56824]: pgmap v990: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:43.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:42 vm00.local ceph-mon[47668]: pgmap v990: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:44 vm08.local ceph-mon[56824]: pgmap v991: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:44 vm00.local ceph-mon[47668]: pgmap v991: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:46.274 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:46.274 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:46.301 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:46.301 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:46 vm08.local ceph-mon[56824]: pgmap v992: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:46 vm00.local ceph-mon[47668]: pgmap v992: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:48 vm08.local ceph-mon[56824]: pgmap v993: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:48 vm00.local ceph-mon[47668]: pgmap v993: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:50 vm08.local ceph-mon[56824]: pgmap v994: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:50 vm00.local ceph-mon[47668]: pgmap v994: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:51.303 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:51.303 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:51.330 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:51.330 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:52 vm08.local ceph-mon[56824]: pgmap v995: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:52 vm00.local ceph-mon[47668]: pgmap v995: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:54 vm08.local ceph-mon[56824]: pgmap v996: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:54 vm00.local ceph-mon[47668]: pgmap v996: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:56.331 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:36:56.332 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:36:56.358 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:36:56.358 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:36:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:56 vm08.local ceph-mon[56824]: pgmap v997: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:56 vm00.local ceph-mon[47668]: pgmap v997: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:36:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:36:58 vm08.local ceph-mon[56824]: pgmap v998: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:36:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:36:58 vm00.local ceph-mon[47668]: pgmap v998: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:00 vm08.local ceph-mon[56824]: pgmap v999: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:00 vm00.local ceph-mon[47668]: pgmap v999: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:01.360 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:01.360 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:01.387 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:01.388 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:02 vm08.local ceph-mon[56824]: pgmap v1000: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:03.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:02 vm00.local ceph-mon[47668]: pgmap v1000: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:04 vm08.local ceph-mon[56824]: pgmap v1001: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:04 vm00.local ceph-mon[47668]: pgmap v1001: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:06.167 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:37:06.167 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:37:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:37:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:37:06.389 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:06.390 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:06.416 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:06.416 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:06 vm00.local ceph-mon[47668]: pgmap v1002: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:06 vm08.local ceph-mon[56824]: pgmap v1002: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:08 vm00.local ceph-mon[47668]: pgmap v1003: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:08 vm08.local ceph-mon[56824]: pgmap v1003: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:10 vm00.local ceph-mon[47668]: pgmap v1004: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:10 vm08.local ceph-mon[56824]: pgmap v1004: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:11.418 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:11.418 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:11.444 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:11.445 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:13.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:12 vm00.local ceph-mon[47668]: pgmap v1005: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:12 vm08.local ceph-mon[56824]: pgmap v1005: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:15.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:14 vm00.local ceph-mon[47668]: pgmap v1006: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:14 vm08.local ceph-mon[56824]: pgmap v1006: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:16.447 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:16.447 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:16.475 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:16.475 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:16.743 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:16 vm08.local ceph-mon[56824]: pgmap v1007: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:16.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:16 vm00.local ceph-mon[47668]: pgmap v1007: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:18 vm00.local ceph-mon[47668]: pgmap v1008: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:18 vm08.local ceph-mon[56824]: pgmap v1008: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:21 vm08.local ceph-mon[56824]: pgmap v1009: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:21.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:21 vm00.local ceph-mon[47668]: pgmap v1009: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:21.477 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:21.477 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:21.504 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:21.504 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:23 vm08.local ceph-mon[56824]: pgmap v1010: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:23 vm00.local ceph-mon[47668]: pgmap v1010: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:25 vm08.local ceph-mon[56824]: pgmap v1011: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:25.432 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:25 vm00.local ceph-mon[47668]: pgmap v1011: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:26.099 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:37:26.099 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:37:26.099 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:26 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:37:26.237 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:37:26.237 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:37:26.237 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:26 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:37:26.506 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:26.506 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:26.538 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:26.538 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:27.599 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:27 vm08.local ceph-mon[56824]: pgmap v1012: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:27.599 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:27 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:27.599 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:27 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:27 vm00.local ceph-mon[47668]: pgmap v1012: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:27 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:27 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:37:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:28 vm08.local ceph-mon[56824]: pgmap v1013: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:37:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:37:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:28 vm00.local ceph-mon[47668]: pgmap v1013: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:30 vm08.local ceph-mon[56824]: pgmap v1014: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:30 vm00.local ceph-mon[47668]: pgmap v1014: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:31.539 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:31.540 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:31.568 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:31.569 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:32 vm08.local ceph-mon[56824]: pgmap v1015: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:32 vm00.local ceph-mon[47668]: pgmap v1015: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:34 vm00.local ceph-mon[47668]: pgmap v1016: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:34 vm08.local ceph-mon[56824]: pgmap v1016: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:36.570 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:36.570 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:36.597 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:36.597 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:36 vm08.local ceph-mon[56824]: pgmap v1017: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:36.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:36 vm00.local ceph-mon[47668]: pgmap v1017: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:39.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:38 vm08.local ceph-mon[56824]: pgmap v1018: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:38 vm00.local ceph-mon[47668]: pgmap v1018: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:40 vm08.local ceph-mon[56824]: pgmap v1019: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:40 vm00.local ceph-mon[47668]: pgmap v1019: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:41.599 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:41.599 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:41.625 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:41.626 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:42 vm08.local ceph-mon[56824]: pgmap v1020: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:43.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:42 vm00.local ceph-mon[47668]: pgmap v1020: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:45.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:44 vm00.local ceph-mon[47668]: pgmap v1021: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:44 vm08.local ceph-mon[56824]: pgmap v1021: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:46.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:46.628 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:46.656 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:46.656 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:46 vm08.local ceph-mon[56824]: pgmap v1022: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:46 vm00.local ceph-mon[47668]: pgmap v1022: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:48 vm08.local ceph-mon[56824]: pgmap v1023: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:48 vm00.local ceph-mon[47668]: pgmap v1023: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:50 vm08.local ceph-mon[56824]: pgmap v1024: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:50 vm00.local ceph-mon[47668]: pgmap v1024: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:51.657 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:51.658 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:51.683 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:51.684 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:52 vm00.local ceph-mon[47668]: pgmap v1025: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:52 vm08.local ceph-mon[56824]: pgmap v1025: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:54 vm00.local ceph-mon[47668]: pgmap v1026: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:54 vm08.local ceph-mon[56824]: pgmap v1026: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:56.685 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:37:56.686 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:37:56.710 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:37:56.711 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:37:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:56 vm08.local ceph-mon[56824]: pgmap v1027: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:56 vm00.local ceph-mon[47668]: pgmap v1027: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:37:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:37:58 vm08.local ceph-mon[56824]: pgmap v1028: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:37:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:37:58 vm00.local ceph-mon[47668]: pgmap v1028: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:00 vm08.local ceph-mon[56824]: pgmap v1029: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:00 vm00.local ceph-mon[47668]: pgmap v1029: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:01.712 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:01.713 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:01.738 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:01.739 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:03.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:02 vm00.local ceph-mon[47668]: pgmap v1030: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:02 vm08.local ceph-mon[56824]: pgmap v1030: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:04 vm00.local ceph-mon[47668]: pgmap v1031: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:04 vm08.local ceph-mon[56824]: pgmap v1031: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:38:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:38:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:38:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:38:06.740 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:06.741 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:06.771 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:06.771 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:06 vm00.local ceph-mon[47668]: pgmap v1032: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:06 vm08.local ceph-mon[56824]: pgmap v1032: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:08 vm00.local ceph-mon[47668]: pgmap v1033: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:08 vm08.local ceph-mon[56824]: pgmap v1033: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:10 vm00.local ceph-mon[47668]: pgmap v1034: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:10 vm08.local ceph-mon[56824]: pgmap v1034: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:11.773 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:11.773 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:11.799 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:11.800 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:13.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:12 vm00.local ceph-mon[47668]: pgmap v1035: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:12 vm08.local ceph-mon[56824]: pgmap v1035: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:14 vm00.local ceph-mon[47668]: pgmap v1036: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:14 vm08.local ceph-mon[56824]: pgmap v1036: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:16.801 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:16.802 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:16.829 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:16.830 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:16 vm08.local ceph-mon[56824]: pgmap v1037: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:16 vm00.local ceph-mon[47668]: pgmap v1037: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:18 vm00.local ceph-mon[47668]: pgmap v1038: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:18 vm08.local ceph-mon[56824]: pgmap v1038: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:21.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:20 vm00.local ceph-mon[47668]: pgmap v1039: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:21.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:20 vm08.local ceph-mon[56824]: pgmap v1039: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:21.831 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:21.832 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:21.859 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:21.859 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:23.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:22 vm00.local ceph-mon[47668]: pgmap v1040: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:22 vm08.local ceph-mon[56824]: pgmap v1040: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:25.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:24 vm00.local ceph-mon[47668]: pgmap v1041: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:24 vm08.local ceph-mon[56824]: pgmap v1041: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:26.861 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:26.861 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:26.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:26 vm08.local ceph-mon[56824]: pgmap v1042: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:26.886 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:26.887 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:26 vm00.local ceph-mon[47668]: pgmap v1042: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:28.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:27 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:38:28.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:27 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:38:28.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:27 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:38:28.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:27 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:38:28.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:27 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:38:28.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:27 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:38:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:28 vm00.local ceph-mon[47668]: pgmap v1043: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:38:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:38:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:28 vm08.local ceph-mon[56824]: pgmap v1043: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:38:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:38:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:30 vm08.local ceph-mon[56824]: pgmap v1044: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:30 vm00.local ceph-mon[47668]: pgmap v1044: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:31.888 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:31.889 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:31.916 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:31.916 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:32 vm08.local ceph-mon[56824]: pgmap v1045: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:32 vm00.local ceph-mon[47668]: pgmap v1045: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:34 vm08.local ceph-mon[56824]: pgmap v1046: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:34 vm00.local ceph-mon[47668]: pgmap v1046: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:36 vm08.local ceph-mon[56824]: pgmap v1047: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:36.917 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:36.918 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:36 vm00.local ceph-mon[47668]: pgmap v1047: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:36.975 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:36.975 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:38 vm00.local ceph-mon[47668]: pgmap v1048: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:38 vm08.local ceph-mon[56824]: pgmap v1048: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:40 vm00.local ceph-mon[47668]: pgmap v1049: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:40 vm08.local ceph-mon[56824]: pgmap v1049: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:41.976 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:41.977 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:42.003 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:42.003 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:43.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:42 vm00.local ceph-mon[47668]: pgmap v1050: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:42 vm08.local ceph-mon[56824]: pgmap v1050: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:44 vm00.local ceph-mon[47668]: pgmap v1051: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:44 vm08.local ceph-mon[56824]: pgmap v1051: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:46 vm08.local ceph-mon[56824]: pgmap v1052: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:46 vm00.local ceph-mon[47668]: pgmap v1052: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:47.005 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:47.005 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:47.031 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:47.032 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:48 vm00.local ceph-mon[47668]: pgmap v1053: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:48 vm08.local ceph-mon[56824]: pgmap v1053: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:50 vm00.local ceph-mon[47668]: pgmap v1054: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:50 vm08.local ceph-mon[56824]: pgmap v1054: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:52.033 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:52.034 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:52.060 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:52.061 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:53.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:52 vm00.local ceph-mon[47668]: pgmap v1055: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:52 vm08.local ceph-mon[56824]: pgmap v1055: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:54 vm00.local ceph-mon[47668]: pgmap v1056: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:54 vm08.local ceph-mon[56824]: pgmap v1056: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:56 vm08.local ceph-mon[56824]: pgmap v1057: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:56 vm00.local ceph-mon[47668]: pgmap v1057: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:38:57.062 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:38:57.063 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:38:57.090 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:38:57.090 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:38:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:38:58 vm00.local ceph-mon[47668]: pgmap v1058: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:38:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:38:58 vm08.local ceph-mon[56824]: pgmap v1058: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:00 vm00.local ceph-mon[47668]: pgmap v1059: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:00 vm08.local ceph-mon[56824]: pgmap v1059: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:02.091 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:02.092 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:02.119 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:02.120 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:03.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:02 vm00.local ceph-mon[47668]: pgmap v1060: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:02 vm08.local ceph-mon[56824]: pgmap v1060: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:04 vm00.local ceph-mon[47668]: pgmap v1061: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:04 vm08.local ceph-mon[56824]: pgmap v1061: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:39:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:39:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:39:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:39:07.121 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:07.122 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:07.148 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:07.148 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:06 vm00.local ceph-mon[47668]: pgmap v1062: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:06 vm08.local ceph-mon[56824]: pgmap v1062: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:08 vm00.local ceph-mon[47668]: pgmap v1063: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:08 vm08.local ceph-mon[56824]: pgmap v1063: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:10 vm00.local ceph-mon[47668]: pgmap v1064: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:10 vm08.local ceph-mon[56824]: pgmap v1064: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:12.150 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:12.150 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:12.176 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:12.176 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:13.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:12 vm00.local ceph-mon[47668]: pgmap v1065: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:12 vm08.local ceph-mon[56824]: pgmap v1065: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:14 vm00.local ceph-mon[47668]: pgmap v1066: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:14 vm08.local ceph-mon[56824]: pgmap v1066: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:16 vm08.local ceph-mon[56824]: pgmap v1067: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:16 vm00.local ceph-mon[47668]: pgmap v1067: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:17.178 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:17.178 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:17.211 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:17.211 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:18 vm00.local ceph-mon[47668]: pgmap v1068: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:18 vm08.local ceph-mon[56824]: pgmap v1068: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:20 vm00.local ceph-mon[47668]: pgmap v1069: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:20 vm08.local ceph-mon[56824]: pgmap v1069: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:22.212 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:22.213 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:22.238 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:22.239 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:23.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:22 vm00.local ceph-mon[47668]: pgmap v1070: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:23.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:22 vm08.local ceph-mon[56824]: pgmap v1070: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:25.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:24 vm00.local ceph-mon[47668]: pgmap v1071: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:24 vm08.local ceph-mon[56824]: pgmap v1071: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:26 vm08.local ceph-mon[56824]: pgmap v1072: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:26 vm00.local ceph-mon[47668]: pgmap v1072: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:27.240 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:27.240 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:27.265 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:27.265 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:28 vm00.local ceph-mon[47668]: pgmap v1073: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:39:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:39:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:39:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:39:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:39:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:28 vm08.local ceph-mon[56824]: pgmap v1073: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:39:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:39:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:39:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:39:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:39:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:30 vm00.local ceph-mon[47668]: pgmap v1074: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:30 vm08.local ceph-mon[56824]: pgmap v1074: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:32.267 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:32.267 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:32.295 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:32.295 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:32 vm00.local ceph-mon[47668]: pgmap v1075: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:32 vm08.local ceph-mon[56824]: pgmap v1075: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:34 vm00.local ceph-mon[47668]: pgmap v1076: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:34 vm08.local ceph-mon[56824]: pgmap v1076: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:36.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:36 vm08.local ceph-mon[56824]: pgmap v1077: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:36.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:36 vm00.local ceph-mon[47668]: pgmap v1077: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:37.297 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:37.298 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:37.324 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:37.324 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:38 vm00.local ceph-mon[47668]: pgmap v1078: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:39.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:38 vm08.local ceph-mon[56824]: pgmap v1078: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:41.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:40 vm00.local ceph-mon[47668]: pgmap v1079: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:41.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:40 vm08.local ceph-mon[56824]: pgmap v1079: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:42.326 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:42.326 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:42.352 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:42.352 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:43.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:42 vm00.local ceph-mon[47668]: pgmap v1080: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:42 vm08.local ceph-mon[56824]: pgmap v1080: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:45.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:44 vm00.local ceph-mon[47668]: pgmap v1081: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:45.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:44 vm08.local ceph-mon[56824]: pgmap v1081: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:46.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:46 vm08.local ceph-mon[56824]: pgmap v1082: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:46.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:46 vm00.local ceph-mon[47668]: pgmap v1082: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:47.354 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:47.355 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:47.383 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:47.383 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:48 vm00.local ceph-mon[47668]: pgmap v1083: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:49.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:48 vm08.local ceph-mon[56824]: pgmap v1083: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:50 vm00.local ceph-mon[47668]: pgmap v1084: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:50 vm08.local ceph-mon[56824]: pgmap v1084: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:52.384 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:52.385 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:52.416 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:52.416 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:52 vm00.local ceph-mon[47668]: pgmap v1085: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:52 vm08.local ceph-mon[56824]: pgmap v1085: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:55.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:54 vm00.local ceph-mon[47668]: pgmap v1086: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:54 vm08.local ceph-mon[56824]: pgmap v1086: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:56.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:56 vm08.local ceph-mon[56824]: pgmap v1087: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:56 vm00.local ceph-mon[47668]: pgmap v1087: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:39:57.418 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:39:57.418 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:39:57.444 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:39:57.445 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:39:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:39:58 vm00.local ceph-mon[47668]: pgmap v1088: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:39:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:39:58 vm08.local ceph-mon[56824]: pgmap v1088: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:00 vm00.local ceph-mon[47668]: pgmap v1089: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:01.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:00 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-08T23:40:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:00 vm08.local ceph-mon[56824]: pgmap v1089: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:01.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:00 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-08T23:40:02.446 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:02.447 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:02.493 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:02.493 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:03.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:02 vm00.local ceph-mon[47668]: pgmap v1090: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:02 vm08.local ceph-mon[56824]: pgmap v1090: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:04 vm08.local ceph-mon[56824]: pgmap v1091: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:04 vm00.local ceph-mon[47668]: pgmap v1091: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:40:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:40:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:40:06.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:40:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:06 vm08.local ceph-mon[56824]: pgmap v1092: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:06 vm00.local ceph-mon[47668]: pgmap v1092: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:07.494 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:07.495 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:07.521 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:07.521 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:08 vm00.local ceph-mon[47668]: pgmap v1093: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:08 vm08.local ceph-mon[56824]: pgmap v1093: 97 pgs: 97 active+clean; 453 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:10 vm08.local ceph-mon[56824]: pgmap v1094: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:11.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:10 vm00.local ceph-mon[47668]: pgmap v1094: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:12.523 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:12.523 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:12.601 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:12.601 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:13.233 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:12 vm00.local ceph-mon[47668]: pgmap v1095: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:12 vm08.local ceph-mon[56824]: pgmap v1095: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:14 vm08.local ceph-mon[56824]: pgmap v1096: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:14 vm00.local ceph-mon[47668]: pgmap v1096: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:16 vm08.local ceph-mon[56824]: pgmap v1097: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:16 vm00.local ceph-mon[47668]: pgmap v1097: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:17.603 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:17.603 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:17.629 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:17.630 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:18 vm00.local ceph-mon[47668]: pgmap v1098: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:18 vm08.local ceph-mon[56824]: pgmap v1098: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:20 vm00.local ceph-mon[47668]: pgmap v1099: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:20 vm08.local ceph-mon[56824]: pgmap v1099: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:22.631 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:22.632 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:22.659 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:22.660 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:23.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:22 vm00.local ceph-mon[47668]: pgmap v1100: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:22 vm08.local ceph-mon[56824]: pgmap v1100: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:24 vm00.local ceph-mon[47668]: pgmap v1101: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:24 vm08.local ceph-mon[56824]: pgmap v1101: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:26 vm08.local ceph-mon[56824]: pgmap v1102: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:26 vm00.local ceph-mon[47668]: pgmap v1102: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:27.661 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:27.662 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:27.687 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:27.688 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:28 vm00.local ceph-mon[47668]: pgmap v1103: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:40:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:40:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:40:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:40:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:40:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:28 vm08.local ceph-mon[56824]: pgmap v1103: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:40:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:40:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:40:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:40:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:40:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:30 vm00.local ceph-mon[47668]: pgmap v1104: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:30 vm08.local ceph-mon[56824]: pgmap v1104: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:32.689 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:32.690 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:32.725 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:32.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:32 vm08.local ceph-mon[56824]: pgmap v1105: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:33.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:32 vm00.local ceph-mon[47668]: pgmap v1105: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:34 vm08.local ceph-mon[56824]: pgmap v1106: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:34 vm00.local ceph-mon[47668]: pgmap v1106: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:36.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:36 vm08.local ceph-mon[56824]: pgmap v1107: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:36.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:36 vm00.local ceph-mon[47668]: pgmap v1107: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:37.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:37.728 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:37.762 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:37.763 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:38 vm00.local ceph-mon[47668]: pgmap v1108: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:38 vm08.local ceph-mon[56824]: pgmap v1108: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:41 vm08.local ceph-mon[56824]: pgmap v1109: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:41 vm00.local ceph-mon[47668]: pgmap v1109: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:42.764 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:42.764 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:42.791 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:42.791 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:43 vm08.local ceph-mon[56824]: pgmap v1110: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:43 vm00.local ceph-mon[47668]: pgmap v1110: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:45 vm08.local ceph-mon[56824]: pgmap v1111: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:45 vm00.local ceph-mon[47668]: pgmap v1111: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:46 vm08.local ceph-mon[56824]: pgmap v1112: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:46 vm00.local ceph-mon[47668]: pgmap v1112: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:47.792 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:47.793 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:47.818 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:47.819 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:48 vm00.local ceph-mon[47668]: pgmap v1113: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:48 vm08.local ceph-mon[56824]: pgmap v1113: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:50 vm00.local ceph-mon[47668]: pgmap v1114: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:50 vm08.local ceph-mon[56824]: pgmap v1114: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:52.820 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:52.821 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:52.847 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:52.847 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:52 vm00.local ceph-mon[47668]: pgmap v1115: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:52 vm08.local ceph-mon[56824]: pgmap v1115: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:54 vm00.local ceph-mon[47668]: pgmap v1116: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:54 vm08.local ceph-mon[56824]: pgmap v1116: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:56 vm08.local ceph-mon[56824]: pgmap v1117: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:56 vm00.local ceph-mon[47668]: pgmap v1117: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:40:57.849 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:40:57.849 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:40:57.874 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:40:57.875 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:40:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:40:58 vm00.local ceph-mon[47668]: pgmap v1118: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:40:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:40:58 vm08.local ceph-mon[56824]: pgmap v1118: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:00 vm00.local ceph-mon[47668]: pgmap v1119: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:00 vm08.local ceph-mon[56824]: pgmap v1119: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:02.876 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:02.877 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:02.903 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:02.904 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:03.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:02 vm00.local ceph-mon[47668]: pgmap v1120: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:02 vm08.local ceph-mon[56824]: pgmap v1120: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:04 vm00.local ceph-mon[47668]: pgmap v1121: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:04 vm08.local ceph-mon[56824]: pgmap v1121: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:41:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:41:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:41:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:41:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:06 vm00.local ceph-mon[47668]: pgmap v1122: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:06 vm08.local ceph-mon[56824]: pgmap v1122: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:07.905 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:07.906 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:07.933 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:07.934 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:08 vm00.local ceph-mon[47668]: pgmap v1123: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:08 vm08.local ceph-mon[56824]: pgmap v1123: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:11.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:11 vm00.local ceph-mon[47668]: pgmap v1124: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:11 vm08.local ceph-mon[56824]: pgmap v1124: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:12.935 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:12.936 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:12.963 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:12.963 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:13 vm08.local ceph-mon[56824]: pgmap v1125: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:13 vm00.local ceph-mon[47668]: pgmap v1125: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:15 vm08.local ceph-mon[56824]: pgmap v1126: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:15 vm00.local ceph-mon[47668]: pgmap v1126: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:17.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:16 vm08.local ceph-mon[56824]: pgmap v1127: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:16 vm00.local ceph-mon[47668]: pgmap v1127: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:17.965 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:17.965 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:18.046 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:18.054 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:18 vm00.local ceph-mon[47668]: pgmap v1128: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:18 vm08.local ceph-mon[56824]: pgmap v1128: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:20 vm08.local ceph-mon[56824]: pgmap v1129: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:20 vm00.local ceph-mon[47668]: pgmap v1129: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:23.049 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:23.049 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:23.079 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:23.079 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:22 vm08.local ceph-mon[56824]: pgmap v1130: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:22 vm00.local ceph-mon[47668]: pgmap v1130: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:24 vm08.local ceph-mon[56824]: pgmap v1131: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:24 vm00.local ceph-mon[47668]: pgmap v1131: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:27.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:26 vm08.local ceph-mon[56824]: pgmap v1132: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:26 vm00.local ceph-mon[47668]: pgmap v1132: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:28.081 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:28.081 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:28.108 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:28.108 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:28 vm00.local ceph-mon[47668]: pgmap v1133: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:41:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:41:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:41:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:41:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:41:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:28 vm08.local ceph-mon[56824]: pgmap v1133: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:41:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:41:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:41:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:41:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:41:31.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:30 vm00.local ceph-mon[47668]: pgmap v1134: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:30 vm08.local ceph-mon[56824]: pgmap v1134: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:33.110 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:33.110 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:33.164 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:33.165 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:32 vm00.local ceph-mon[47668]: pgmap v1135: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:32 vm08.local ceph-mon[56824]: pgmap v1135: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:34 vm00.local ceph-mon[47668]: pgmap v1136: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:34 vm08.local ceph-mon[56824]: pgmap v1136: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:36 vm08.local ceph-mon[56824]: pgmap v1137: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:36 vm00.local ceph-mon[47668]: pgmap v1137: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:38.167 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:38.167 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:38.194 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:38.194 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:38 vm00.local ceph-mon[47668]: pgmap v1138: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:38 vm08.local ceph-mon[56824]: pgmap v1138: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:40 vm08.local ceph-mon[56824]: pgmap v1139: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:41.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:40 vm00.local ceph-mon[47668]: pgmap v1139: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:43.196 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:43.196 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:43.221 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:43.222 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:43.232 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:42 vm00.local ceph-mon[47668]: pgmap v1140: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:42 vm08.local ceph-mon[56824]: pgmap v1140: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:44 vm08.local ceph-mon[56824]: pgmap v1141: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:44 vm00.local ceph-mon[47668]: pgmap v1141: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:46 vm08.local ceph-mon[56824]: pgmap v1142: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:46 vm00.local ceph-mon[47668]: pgmap v1142: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:48.224 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:48.225 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:48.250 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:48.251 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:48 vm00.local ceph-mon[47668]: pgmap v1143: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:48 vm08.local ceph-mon[56824]: pgmap v1143: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:50 vm00.local ceph-mon[47668]: pgmap v1144: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:50 vm08.local ceph-mon[56824]: pgmap v1144: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:52 vm00.local ceph-mon[47668]: pgmap v1145: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:53.252 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:53.253 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:53.293 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:53.293 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:53.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:52 vm08.local ceph-mon[56824]: pgmap v1145: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:54 vm08.local ceph-mon[56824]: pgmap v1146: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:54 vm00.local ceph-mon[47668]: pgmap v1146: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:56 vm08.local ceph-mon[56824]: pgmap v1147: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:56 vm00.local ceph-mon[47668]: pgmap v1147: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:41:58.294 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:41:58.295 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:41:58.322 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:41:58.323 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:41:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:41:58 vm00.local ceph-mon[47668]: pgmap v1148: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:41:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:41:58 vm08.local ceph-mon[56824]: pgmap v1148: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:01.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:00 vm00.local ceph-mon[47668]: pgmap v1149: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:00 vm08.local ceph-mon[56824]: pgmap v1149: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:03.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:02 vm00.local ceph-mon[47668]: pgmap v1150: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:03.324 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:03.325 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:03.351 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:03.351 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:02 vm08.local ceph-mon[56824]: pgmap v1150: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:04 vm08.local ceph-mon[56824]: pgmap v1151: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:04 vm00.local ceph-mon[47668]: pgmap v1151: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:42:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:42:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:42:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:42:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:06 vm08.local ceph-mon[56824]: pgmap v1152: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:06 vm00.local ceph-mon[47668]: pgmap v1152: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:08.353 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:08.354 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:08.379 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:08.379 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:09.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:08 vm00.local ceph-mon[47668]: pgmap v1153: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:08 vm08.local ceph-mon[56824]: pgmap v1153: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:10 vm08.local ceph-mon[56824]: pgmap v1154: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:11.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:10 vm00.local ceph-mon[47668]: pgmap v1154: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:13.233 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:12 vm00.local ceph-mon[47668]: pgmap v1155: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:12 vm08.local ceph-mon[56824]: pgmap v1155: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:13.380 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:13.381 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:13.408 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:13.408 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:14 vm08.local ceph-mon[56824]: pgmap v1156: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:14 vm00.local ceph-mon[47668]: pgmap v1156: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:17.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:16 vm08.local ceph-mon[56824]: pgmap v1157: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:16 vm00.local ceph-mon[47668]: pgmap v1157: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:18.410 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:18.410 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:18.438 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:18.438 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:18 vm00.local ceph-mon[47668]: pgmap v1158: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:18 vm08.local ceph-mon[56824]: pgmap v1158: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:20 vm00.local ceph-mon[47668]: pgmap v1159: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:20 vm08.local ceph-mon[56824]: pgmap v1159: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:22 vm08.local ceph-mon[56824]: pgmap v1160: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:22 vm00.local ceph-mon[47668]: pgmap v1160: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:23.439 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:23.440 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:23.465 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:23.466 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:24 vm08.local ceph-mon[56824]: pgmap v1161: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:25.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:24 vm00.local ceph-mon[47668]: pgmap v1161: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:27.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:26 vm08.local ceph-mon[56824]: pgmap v1162: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:26 vm00.local ceph-mon[47668]: pgmap v1162: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:28.467 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:28.468 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:28.493 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:28.494 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:28 vm08.local ceph-mon[56824]: pgmap v1163: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:42:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:42:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:28 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:42:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:28 vm00.local ceph-mon[47668]: pgmap v1163: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:42:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:42:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:28 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:42:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:42:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:42:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:42:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:42:31.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:31 vm08.local ceph-mon[56824]: pgmap v1164: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:31.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:31 vm00.local ceph-mon[47668]: pgmap v1164: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:33 vm08.local ceph-mon[56824]: pgmap v1165: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:33 vm00.local ceph-mon[47668]: pgmap v1165: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:33.495 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:33.496 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:33.525 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:33.526 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:35 vm08.local ceph-mon[56824]: pgmap v1166: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:35 vm00.local ceph-mon[47668]: pgmap v1166: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:37.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:36 vm08.local ceph-mon[56824]: pgmap v1167: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:36 vm00.local ceph-mon[47668]: pgmap v1167: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:38.527 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:38.528 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:38.554 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:38.555 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:38 vm00.local ceph-mon[47668]: pgmap v1168: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:39.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:38 vm08.local ceph-mon[56824]: pgmap v1168: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:41.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:41 vm08.local ceph-mon[56824]: pgmap v1169: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:41.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:41 vm00.local ceph-mon[47668]: pgmap v1169: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:43 vm08.local ceph-mon[56824]: pgmap v1170: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:43 vm00.local ceph-mon[47668]: pgmap v1170: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:43.556 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:43.557 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:43.584 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:43.584 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:45 vm08.local ceph-mon[56824]: pgmap v1171: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:45 vm00.local ceph-mon[47668]: pgmap v1171: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:46 vm08.local ceph-mon[56824]: pgmap v1172: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:46 vm00.local ceph-mon[47668]: pgmap v1172: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:48.586 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:48.587 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:48.612 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:48.613 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:48 vm00.local ceph-mon[47668]: pgmap v1173: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:48 vm08.local ceph-mon[56824]: pgmap v1173: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:50 vm08.local ceph-mon[56824]: pgmap v1174: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:50 vm00.local ceph-mon[47668]: pgmap v1174: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:52 vm08.local ceph-mon[56824]: pgmap v1175: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:52 vm00.local ceph-mon[47668]: pgmap v1175: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:53.615 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:53.615 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:53.645 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:53.646 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:54 vm08.local ceph-mon[56824]: pgmap v1176: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:54 vm00.local ceph-mon[47668]: pgmap v1176: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:57.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:56 vm08.local ceph-mon[56824]: pgmap v1177: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:56 vm00.local ceph-mon[47668]: pgmap v1177: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:42:58.647 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:42:58.648 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:42:58.675 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:42:58.676 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:42:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:42:58 vm00.local ceph-mon[47668]: pgmap v1178: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:42:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:42:58 vm08.local ceph-mon[56824]: pgmap v1178: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:00 vm08.local ceph-mon[56824]: pgmap v1179: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:00 vm00.local ceph-mon[47668]: pgmap v1179: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:02 vm08.local ceph-mon[56824]: pgmap v1180: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:02 vm00.local ceph-mon[47668]: pgmap v1180: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:03.677 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:03.678 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:03.707 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:03.707 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:05.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:04 vm08.local ceph-mon[56824]: pgmap v1181: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:04 vm00.local ceph-mon[47668]: pgmap v1181: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:43:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:43:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:43:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:43:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:06 vm08.local ceph-mon[56824]: pgmap v1182: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:07.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:06 vm00.local ceph-mon[47668]: pgmap v1182: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:08.709 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:08.709 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:08.736 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:08.736 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:08 vm00.local ceph-mon[47668]: pgmap v1183: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:08 vm08.local ceph-mon[56824]: pgmap v1183: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:10 vm08.local ceph-mon[56824]: pgmap v1184: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:11.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:10 vm00.local ceph-mon[47668]: pgmap v1184: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:13.232 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:12 vm00.local ceph-mon[47668]: pgmap v1185: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:12 vm08.local ceph-mon[56824]: pgmap v1185: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:13.737 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:13.738 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:13.764 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:13.765 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:14 vm08.local ceph-mon[56824]: pgmap v1186: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:14 vm00.local ceph-mon[47668]: pgmap v1186: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:17.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:16 vm08.local ceph-mon[56824]: pgmap v1187: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:16 vm00.local ceph-mon[47668]: pgmap v1187: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:18.766 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:18.767 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:18.793 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:18.793 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:18 vm00.local ceph-mon[47668]: pgmap v1188: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:18 vm08.local ceph-mon[56824]: pgmap v1188: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:20 vm08.local ceph-mon[56824]: pgmap v1189: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:20 vm00.local ceph-mon[47668]: pgmap v1189: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:23.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:23 vm08.local ceph-mon[56824]: pgmap v1190: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:23 vm00.local ceph-mon[47668]: pgmap v1190: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:23.795 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:23.795 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:23.822 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:23.823 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:25 vm08.local ceph-mon[56824]: pgmap v1191: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:25.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:25 vm00.local ceph-mon[47668]: pgmap v1191: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:27.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:26 vm08.local ceph-mon[56824]: pgmap v1192: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:26 vm00.local ceph-mon[47668]: pgmap v1192: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:28.824 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:28.825 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:28.854 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:28.855 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:29.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:28 vm00.local ceph-mon[47668]: pgmap v1193: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:29.242 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:28 vm08.local ceph-mon[56824]: pgmap v1193: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:29 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:43:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:29 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:43:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:29 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:43:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:29 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:43:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:29 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:43:30.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:29 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:43:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:29 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:43:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:29 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:43:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:29 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:43:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:29 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:43:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:31 vm08.local ceph-mon[56824]: pgmap v1194: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:31 vm00.local ceph-mon[47668]: pgmap v1194: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:33 vm00.local ceph-mon[47668]: pgmap v1195: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:33 vm08.local ceph-mon[56824]: pgmap v1195: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:33.856 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:33.857 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:33.882 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:33.883 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:35 vm08.local ceph-mon[56824]: pgmap v1196: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:35 vm00.local ceph-mon[47668]: pgmap v1196: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:37.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:36 vm08.local ceph-mon[56824]: pgmap v1197: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:36 vm00.local ceph-mon[47668]: pgmap v1197: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:38.884 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:38.885 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:38.913 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:38.914 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:38 vm00.local ceph-mon[47668]: pgmap v1198: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:38 vm08.local ceph-mon[56824]: pgmap v1198: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:40 vm08.local ceph-mon[56824]: pgmap v1199: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:40 vm00.local ceph-mon[47668]: pgmap v1199: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:43.232 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:42 vm00.local ceph-mon[47668]: pgmap v1200: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:42 vm08.local ceph-mon[56824]: pgmap v1200: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:43.915 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:43.916 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:43.943 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:43.943 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:45.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:44 vm08.local ceph-mon[56824]: pgmap v1201: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:44 vm00.local ceph-mon[47668]: pgmap v1201: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:46 vm08.local ceph-mon[56824]: pgmap v1202: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:46 vm00.local ceph-mon[47668]: pgmap v1202: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:48.945 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:48.945 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:49.027 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:49.028 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:49 vm00.local ceph-mon[47668]: pgmap v1203: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:49.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:49 vm08.local ceph-mon[56824]: pgmap v1203: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:51.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:51 vm00.local ceph-mon[47668]: pgmap v1204: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:51.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:51 vm08.local ceph-mon[56824]: pgmap v1204: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:53 vm00.local ceph-mon[47668]: pgmap v1205: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:53 vm08.local ceph-mon[56824]: pgmap v1205: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:54.029 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:54.030 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:54.056 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:54.056 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:55.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:55 vm00.local ceph-mon[47668]: pgmap v1206: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:55 vm08.local ceph-mon[56824]: pgmap v1206: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:57.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:56 vm08.local ceph-mon[56824]: pgmap v1207: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:56 vm00.local ceph-mon[47668]: pgmap v1207: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:43:59.058 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:43:59.058 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:43:59.084 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:43:59.084 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:43:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:43:58 vm00.local ceph-mon[47668]: pgmap v1208: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:43:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:43:58 vm08.local ceph-mon[56824]: pgmap v1208: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:00 vm08.local ceph-mon[56824]: pgmap v1209: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:00 vm00.local ceph-mon[47668]: pgmap v1209: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:02 vm08.local ceph-mon[56824]: pgmap v1210: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:02 vm00.local ceph-mon[47668]: pgmap v1210: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:04.086 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:04.086 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:04.112 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:04.113 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:05.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:05 vm08.local ceph-mon[56824]: pgmap v1211: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:05 vm00.local ceph-mon[47668]: pgmap v1211: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:44:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:44:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:44:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:44:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:07 vm08.local ceph-mon[56824]: pgmap v1212: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:07 vm00.local ceph-mon[47668]: pgmap v1212: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:09.115 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:09.115 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:09.142 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:09.142 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:09 vm08.local ceph-mon[56824]: pgmap v1213: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:09 vm00.local ceph-mon[47668]: pgmap v1213: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:11 vm08.local ceph-mon[56824]: pgmap v1214: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:11 vm00.local ceph-mon[47668]: pgmap v1214: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:13 vm08.local ceph-mon[56824]: pgmap v1215: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:13 vm00.local ceph-mon[47668]: pgmap v1215: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:14.144 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:14.144 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:14.171 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:14.171 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:15.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:15 vm08.local ceph-mon[56824]: pgmap v1216: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:15 vm00.local ceph-mon[47668]: pgmap v1216: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:17.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:16 vm08.local ceph-mon[56824]: pgmap v1217: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:16 vm00.local ceph-mon[47668]: pgmap v1217: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:19.172 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:19.173 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:18 vm00.local ceph-mon[47668]: pgmap v1218: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:19.198 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:19.198 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:18 vm08.local ceph-mon[56824]: pgmap v1218: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:20 vm08.local ceph-mon[56824]: pgmap v1219: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:20 vm00.local ceph-mon[47668]: pgmap v1219: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:22 vm08.local ceph-mon[56824]: pgmap v1220: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:22 vm00.local ceph-mon[47668]: pgmap v1220: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:24.200 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:24.200 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:24.226 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:24.226 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:24 vm08.local ceph-mon[56824]: pgmap v1221: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:24 vm00.local ceph-mon[47668]: pgmap v1221: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:27.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:27 vm00.local ceph-mon[47668]: pgmap v1222: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:27.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:27 vm08.local ceph-mon[56824]: pgmap v1222: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:29.228 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:29.228 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:29.264 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:29.265 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:29.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:29 vm00.local ceph-mon[47668]: pgmap v1223: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:29.523 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:29 vm08.local ceph-mon[56824]: pgmap v1223: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:44:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:44:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:44:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:44:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:44:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:44:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:44:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:44:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:44:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:44:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:44:30.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:44:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:31 vm08.local ceph-mon[56824]: pgmap v1224: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:31 vm00.local ceph-mon[47668]: pgmap v1224: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:33.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:33 vm00.local ceph-mon[47668]: pgmap v1225: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:33 vm08.local ceph-mon[56824]: pgmap v1225: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:34.266 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:34.267 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:34.293 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:34.293 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:35 vm08.local ceph-mon[56824]: pgmap v1226: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:35 vm00.local ceph-mon[47668]: pgmap v1226: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:37 vm08.local ceph-mon[56824]: pgmap v1227: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:37 vm00.local ceph-mon[47668]: pgmap v1227: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:39.295 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:39.295 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:39.323 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:39.323 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:39 vm08.local ceph-mon[56824]: pgmap v1228: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:39.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:39 vm00.local ceph-mon[47668]: pgmap v1228: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:40.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:40 vm08.local ceph-mon[56824]: pgmap v1229: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:40.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:40 vm00.local ceph-mon[47668]: pgmap v1229: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:43.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:42 vm00.local ceph-mon[47668]: pgmap v1230: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:42 vm08.local ceph-mon[56824]: pgmap v1230: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:44.324 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:44.325 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:44.351 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:44.351 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:44 vm08.local ceph-mon[56824]: pgmap v1231: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:44 vm00.local ceph-mon[47668]: pgmap v1231: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:47 vm00.local ceph-mon[47668]: pgmap v1232: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:47 vm08.local ceph-mon[56824]: pgmap v1232: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:49.353 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:49.353 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:49.378 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:49.379 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:49 vm00.local ceph-mon[47668]: pgmap v1233: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:49 vm08.local ceph-mon[56824]: pgmap v1233: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:51 vm08.local ceph-mon[56824]: pgmap v1234: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:51 vm00.local ceph-mon[47668]: pgmap v1234: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:53 vm00.local ceph-mon[47668]: pgmap v1235: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:53 vm08.local ceph-mon[56824]: pgmap v1235: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:54.380 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:54.381 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:54.407 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:54.407 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:55 vm08.local ceph-mon[56824]: pgmap v1236: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:55 vm00.local ceph-mon[47668]: pgmap v1236: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:57 vm08.local ceph-mon[56824]: pgmap v1237: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:57 vm00.local ceph-mon[47668]: pgmap v1237: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:44:59.409 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:44:59.409 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:44:59.436 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:44:59.437 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:44:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:44:59 vm08.local ceph-mon[56824]: pgmap v1238: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:44:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:44:59 vm00.local ceph-mon[47668]: pgmap v1238: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:01 vm08.local ceph-mon[56824]: pgmap v1239: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:01 vm00.local ceph-mon[47668]: pgmap v1239: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:03 vm00.local ceph-mon[47668]: pgmap v1240: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:03.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:03 vm08.local ceph-mon[56824]: pgmap v1240: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:04.438 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:04.439 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:04.465 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:04.465 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:05 vm08.local ceph-mon[56824]: pgmap v1241: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:05 vm00.local ceph-mon[47668]: pgmap v1241: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:45:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:45:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:45:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:45:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:07 vm08.local ceph-mon[56824]: pgmap v1242: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:07 vm00.local ceph-mon[47668]: pgmap v1242: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:09.466 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:09.467 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:09.494 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:09.495 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:09 vm08.local ceph-mon[56824]: pgmap v1243: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:09 vm00.local ceph-mon[47668]: pgmap v1243: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:10.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:10 vm08.local ceph-mon[56824]: pgmap v1244: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:10.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:10 vm00.local ceph-mon[47668]: pgmap v1244: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:12 vm08.local ceph-mon[56824]: pgmap v1245: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:13.382 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:12 vm00.local ceph-mon[47668]: pgmap v1245: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:14.496 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:14.497 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:14.524 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:14.524 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:14 vm08.local ceph-mon[56824]: pgmap v1246: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:14 vm00.local ceph-mon[47668]: pgmap v1246: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:17 vm08.local ceph-mon[56824]: pgmap v1247: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:17 vm00.local ceph-mon[47668]: pgmap v1247: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:19 vm00.local ceph-mon[47668]: pgmap v1248: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:19 vm08.local ceph-mon[56824]: pgmap v1248: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:19.526 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:19.526 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:19.553 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:19.554 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:21 vm08.local ceph-mon[56824]: pgmap v1249: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:21 vm00.local ceph-mon[47668]: pgmap v1249: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:23 vm08.local ceph-mon[56824]: pgmap v1250: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:23.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:23 vm00.local ceph-mon[47668]: pgmap v1250: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:24.555 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:24.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:24.580 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:24.581 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:25 vm08.local ceph-mon[56824]: pgmap v1251: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:25 vm00.local ceph-mon[47668]: pgmap v1251: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:27 vm08.local ceph-mon[56824]: pgmap v1252: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:27 vm00.local ceph-mon[47668]: pgmap v1252: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:29 vm00.local ceph-mon[47668]: pgmap v1253: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:29 vm08.local ceph-mon[56824]: pgmap v1253: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:29.582 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:29.583 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:29.608 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:29.609 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:30.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:45:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:45:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:45:30.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:45:30.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:45:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:45:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:45:30.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:45:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:31 vm08.local ceph-mon[56824]: pgmap v1254: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:45:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:45:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:31 vm00.local ceph-mon[47668]: pgmap v1254: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:31.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:45:31.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:45:33.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:33 vm00.local ceph-mon[47668]: pgmap v1255: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:33 vm08.local ceph-mon[56824]: pgmap v1255: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:34.610 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:34.610 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:34.636 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:34.636 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:35 vm08.local ceph-mon[56824]: pgmap v1256: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:35 vm00.local ceph-mon[47668]: pgmap v1256: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:37 vm08.local ceph-mon[56824]: pgmap v1257: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:37 vm00.local ceph-mon[47668]: pgmap v1257: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:39 vm08.local ceph-mon[56824]: pgmap v1258: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:39.638 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:39.638 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:39.663 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:39.664 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:39 vm00.local ceph-mon[47668]: pgmap v1258: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:41 vm08.local ceph-mon[56824]: pgmap v1259: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:41 vm00.local ceph-mon[47668]: pgmap v1259: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:43 vm00.local ceph-mon[47668]: pgmap v1260: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:43 vm08.local ceph-mon[56824]: pgmap v1260: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:44.665 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:44.665 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:44.691 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:44.692 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:45 vm08.local ceph-mon[56824]: pgmap v1261: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:45 vm00.local ceph-mon[47668]: pgmap v1261: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:47.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:47 vm08.local ceph-mon[56824]: pgmap v1262: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:47 vm00.local ceph-mon[47668]: pgmap v1262: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:49 vm00.local ceph-mon[47668]: pgmap v1263: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:49.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:49 vm08.local ceph-mon[56824]: pgmap v1263: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:49.694 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:49.694 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:49.721 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:49.721 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:51 vm08.local ceph-mon[56824]: pgmap v1264: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:51 vm00.local ceph-mon[47668]: pgmap v1264: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:53 vm08.local ceph-mon[56824]: pgmap v1265: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:53.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:53 vm00.local ceph-mon[47668]: pgmap v1265: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:54.723 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:54.723 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:54.753 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:54.753 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:45:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:55 vm08.local ceph-mon[56824]: pgmap v1266: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:55 vm00.local ceph-mon[47668]: pgmap v1266: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:57 vm00.local ceph-mon[47668]: pgmap v1267: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:57 vm08.local ceph-mon[56824]: pgmap v1267: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:45:59.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:45:59 vm00.local ceph-mon[47668]: pgmap v1268: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:45:59 vm08.local ceph-mon[56824]: pgmap v1268: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:45:59.755 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:45:59.755 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:45:59.781 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:45:59.781 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:01 vm00.local ceph-mon[47668]: pgmap v1269: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:01 vm08.local ceph-mon[56824]: pgmap v1269: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:03.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:03 vm00.local ceph-mon[47668]: pgmap v1270: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:03 vm08.local ceph-mon[56824]: pgmap v1270: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:04.782 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:04.786 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:04.809 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:04.810 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:05 vm00.local ceph-mon[47668]: pgmap v1271: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:05 vm08.local ceph-mon[56824]: pgmap v1271: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:46:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:46:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:46:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:46:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:07 vm00.local ceph-mon[47668]: pgmap v1272: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:07 vm08.local ceph-mon[56824]: pgmap v1272: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:09 vm08.local ceph-mon[56824]: pgmap v1273: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:09 vm00.local ceph-mon[47668]: pgmap v1273: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:09.812 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:09.812 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:09.839 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:09.840 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:11 vm08.local ceph-mon[56824]: pgmap v1274: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:11 vm00.local ceph-mon[47668]: pgmap v1274: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:13.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:13 vm00.local ceph-mon[47668]: pgmap v1275: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:13.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:13 vm08.local ceph-mon[56824]: pgmap v1275: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:14.841 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:14.842 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:14.868 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:14.869 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:15 vm08.local ceph-mon[56824]: pgmap v1276: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:15 vm00.local ceph-mon[47668]: pgmap v1276: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:17 vm08.local ceph-mon[56824]: pgmap v1277: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:17.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:17 vm00.local ceph-mon[47668]: pgmap v1277: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:19.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:19 vm08.local ceph-mon[56824]: pgmap v1278: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:19 vm00.local ceph-mon[47668]: pgmap v1278: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:19.870 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:19.870 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:19.897 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:19.898 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:21.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:21 vm08.local ceph-mon[56824]: pgmap v1279: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:21.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:21 vm00.local ceph-mon[47668]: pgmap v1279: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:23 vm00.local ceph-mon[47668]: pgmap v1280: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:23.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:23 vm08.local ceph-mon[56824]: pgmap v1280: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:24.899 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:24.900 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:24.926 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:24.926 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:25 vm08.local ceph-mon[56824]: pgmap v1281: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:25.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:25 vm00.local ceph-mon[47668]: pgmap v1281: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:27 vm08.local ceph-mon[56824]: pgmap v1282: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:27 vm00.local ceph-mon[47668]: pgmap v1282: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:29 vm08.local ceph-mon[56824]: pgmap v1283: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:29 vm00.local ceph-mon[47668]: pgmap v1283: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:29.928 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:29.928 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:29.954 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:29.955 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:46:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:46:30.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:30 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:46:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:46:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:46:30.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:30 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:46:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:31 vm08.local ceph-mon[56824]: pgmap v1284: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:46:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:46:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:46:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:31 vm00.local ceph-mon[47668]: pgmap v1284: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:31.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:46:31.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:46:31.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:46:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:33 vm00.local ceph-mon[47668]: pgmap v1285: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:33 vm08.local ceph-mon[56824]: pgmap v1285: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:34.956 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:34.957 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:34.982 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:34.983 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:35 vm08.local ceph-mon[56824]: pgmap v1286: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:35 vm00.local ceph-mon[47668]: pgmap v1286: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:37 vm08.local ceph-mon[56824]: pgmap v1287: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:37 vm00.local ceph-mon[47668]: pgmap v1287: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:39 vm08.local ceph-mon[56824]: pgmap v1288: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:39 vm00.local ceph-mon[47668]: pgmap v1288: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:39.984 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:39.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:40.011 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:40.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:41 vm08.local ceph-mon[56824]: pgmap v1289: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:41 vm00.local ceph-mon[47668]: pgmap v1289: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:43 vm00.local ceph-mon[47668]: pgmap v1290: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:43 vm08.local ceph-mon[56824]: pgmap v1290: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:45.013 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:45.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:45.040 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:45.041 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:45 vm08.local ceph-mon[56824]: pgmap v1291: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:45 vm00.local ceph-mon[47668]: pgmap v1291: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:47 vm08.local ceph-mon[56824]: pgmap v1292: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:47 vm00.local ceph-mon[47668]: pgmap v1292: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:49 vm08.local ceph-mon[56824]: pgmap v1293: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:49.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:49 vm00.local ceph-mon[47668]: pgmap v1293: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:50.042 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:50.043 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:50.070 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:50.071 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:50 vm08.local ceph-mon[56824]: pgmap v1294: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:50.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:50 vm00.local ceph-mon[47668]: pgmap v1294: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:53 vm00.local ceph-mon[47668]: pgmap v1295: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:53 vm08.local ceph-mon[56824]: pgmap v1295: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:55.072 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:46:55.073 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:46:55.099 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:46:55.099 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:46:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:55 vm08.local ceph-mon[56824]: pgmap v1296: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:55 vm00.local ceph-mon[47668]: pgmap v1296: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:57 vm08.local ceph-mon[56824]: pgmap v1297: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:57 vm00.local ceph-mon[47668]: pgmap v1297: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:46:58.840 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:46:58 vm00.local ceph-mon[47668]: pgmap v1298: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:46:58.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:46:58 vm08.local ceph-mon[56824]: pgmap v1298: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:00.101 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:00.101 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:00.127 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:00.128 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:00 vm08.local ceph-mon[56824]: pgmap v1299: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:00 vm00.local ceph-mon[47668]: pgmap v1299: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:03.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:03 vm00.local ceph-mon[47668]: pgmap v1300: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:03 vm08.local ceph-mon[56824]: pgmap v1300: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:05.129 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:05.130 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:05.156 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:05.156 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:05 vm08.local ceph-mon[56824]: pgmap v1301: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:05 vm00.local ceph-mon[47668]: pgmap v1301: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:47:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:47:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:47:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:47:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:07 vm08.local ceph-mon[56824]: pgmap v1302: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:07 vm00.local ceph-mon[47668]: pgmap v1302: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:09 vm08.local ceph-mon[56824]: pgmap v1303: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:09 vm00.local ceph-mon[47668]: pgmap v1303: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:10.158 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:10.158 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:10.186 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:10.187 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:11 vm08.local ceph-mon[56824]: pgmap v1304: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:11 vm00.local ceph-mon[47668]: pgmap v1304: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:13 vm00.local ceph-mon[47668]: pgmap v1305: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:13 vm08.local ceph-mon[56824]: pgmap v1305: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:15.188 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:15.188 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:15.214 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:15.215 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:15 vm08.local ceph-mon[56824]: pgmap v1306: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:15 vm00.local ceph-mon[47668]: pgmap v1306: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:17 vm08.local ceph-mon[56824]: pgmap v1307: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:17 vm00.local ceph-mon[47668]: pgmap v1307: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:19 vm08.local ceph-mon[56824]: pgmap v1308: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:19 vm00.local ceph-mon[47668]: pgmap v1308: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:20.216 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:20.217 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:20.242 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:20.243 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:21 vm08.local ceph-mon[56824]: pgmap v1309: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:21 vm00.local ceph-mon[47668]: pgmap v1309: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:23 vm00.local ceph-mon[47668]: pgmap v1310: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:23 vm08.local ceph-mon[56824]: pgmap v1310: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:25.244 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:25.244 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:25.273 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:25.274 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:25 vm08.local ceph-mon[56824]: pgmap v1311: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:25 vm00.local ceph-mon[47668]: pgmap v1311: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:27 vm08.local ceph-mon[56824]: pgmap v1312: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:27 vm00.local ceph-mon[47668]: pgmap v1312: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:29 vm08.local ceph-mon[56824]: pgmap v1313: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:29 vm00.local ceph-mon[47668]: pgmap v1313: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:30.275 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:30.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:30.302 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:30.303 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:31.362 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:31 vm08.local ceph-mon[56824]: pgmap v1314: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:31.362 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:47:31.362 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:47:31.362 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:31 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:47:31.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:31 vm00.local ceph-mon[47668]: pgmap v1314: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:31.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:47:31.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:47:31.521 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:31 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:47:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:47:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:32 vm08.local ceph-mon[56824]: pgmap v1315: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:47:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:32 vm00.local ceph-mon[47668]: pgmap v1315: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:47:35.304 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:35.305 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:35.332 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:35.333 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:34 vm08.local ceph-mon[56824]: pgmap v1316: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:34 vm00.local ceph-mon[47668]: pgmap v1316: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:37 vm08.local ceph-mon[56824]: pgmap v1317: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:37 vm00.local ceph-mon[47668]: pgmap v1317: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:39 vm08.local ceph-mon[56824]: pgmap v1318: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:39 vm00.local ceph-mon[47668]: pgmap v1318: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:40.334 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:40.335 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:40.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:40.361 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:41 vm08.local ceph-mon[56824]: pgmap v1319: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:41 vm00.local ceph-mon[47668]: pgmap v1319: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:42 vm08.local ceph-mon[56824]: pgmap v1320: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:42 vm00.local ceph-mon[47668]: pgmap v1320: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:45.363 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:45.364 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:45.391 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:45.393 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:45 vm00.local ceph-mon[47668]: pgmap v1321: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:45 vm08.local ceph-mon[56824]: pgmap v1321: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:47 vm08.local ceph-mon[56824]: pgmap v1322: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:47 vm00.local ceph-mon[47668]: pgmap v1322: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:47:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:49 vm08.local ceph-mon[56824]: pgmap v1323: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:49 vm00.local ceph-mon[47668]: pgmap v1323: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:50.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:50.395 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:50.425 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:50.426 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:51 vm08.local ceph-mon[56824]: pgmap v1324: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:51 vm00.local ceph-mon[47668]: pgmap v1324: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:53 vm00.local ceph-mon[47668]: pgmap v1325: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:53 vm08.local ceph-mon[56824]: pgmap v1325: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:55.427 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:47:55.428 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:47:55.454 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:47:55.454 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:47:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:55 vm08.local ceph-mon[56824]: pgmap v1326: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:47:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:55 vm00.local ceph-mon[47668]: pgmap v1326: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:47:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:57 vm08.local ceph-mon[56824]: pgmap v1327: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:57 vm00.local ceph-mon[47668]: pgmap v1327: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:47:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:47:59 vm08.local ceph-mon[56824]: pgmap v1328: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:47:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:47:59 vm00.local ceph-mon[47668]: pgmap v1328: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:48:00.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:00.456 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:00.482 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:00.483 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:00.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:00 vm00.local ceph-mon[47668]: pgmap v1329: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:48:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:00 vm08.local ceph-mon[56824]: pgmap v1329: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:48:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:03 vm00.local ceph-mon[47668]: pgmap v1330: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:03 vm08.local ceph-mon[56824]: pgmap v1330: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:05.484 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:05.485 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:05.512 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:05.513 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:05 vm08.local ceph-mon[56824]: pgmap v1331: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:05 vm00.local ceph-mon[47668]: pgmap v1331: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:06.477 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:48:06.477 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:48:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:48:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:48:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:07 vm08.local ceph-mon[56824]: pgmap v1332: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:07 vm00.local ceph-mon[47668]: pgmap v1332: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:09 vm08.local ceph-mon[56824]: pgmap v1333: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:09 vm00.local ceph-mon[47668]: pgmap v1333: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:10.514 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:10.515 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:10.542 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:10.543 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:11.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:11 vm08.local ceph-mon[56824]: pgmap v1334: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:11 vm00.local ceph-mon[47668]: pgmap v1334: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:13 vm00.local ceph-mon[47668]: pgmap v1335: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:13 vm08.local ceph-mon[56824]: pgmap v1335: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:15.544 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:15.545 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:15.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:15.572 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:15 vm08.local ceph-mon[56824]: pgmap v1336: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:15 vm00.local ceph-mon[47668]: pgmap v1336: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:17 vm08.local ceph-mon[56824]: pgmap v1337: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:17 vm00.local ceph-mon[47668]: pgmap v1337: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:19 vm08.local ceph-mon[56824]: pgmap v1338: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:19 vm00.local ceph-mon[47668]: pgmap v1338: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:20.574 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:20.574 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:20.609 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:20.610 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:21 vm08.local ceph-mon[56824]: pgmap v1339: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:21 vm00.local ceph-mon[47668]: pgmap v1339: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:22 vm08.local ceph-mon[56824]: pgmap v1340: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:22 vm00.local ceph-mon[47668]: pgmap v1340: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:25 vm08.local ceph-mon[56824]: pgmap v1341: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:25 vm00.local ceph-mon[47668]: pgmap v1341: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:25.612 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:25.612 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:25.639 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:25.639 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:27 vm08.local ceph-mon[56824]: pgmap v1342: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:27 vm00.local ceph-mon[47668]: pgmap v1342: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:29 vm08.local ceph-mon[56824]: pgmap v1343: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:29 vm00.local ceph-mon[47668]: pgmap v1343: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:30.641 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:30.642 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:30.672 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:30.673 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:31 vm08.local ceph-mon[56824]: pgmap v1344: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:31 vm00.local ceph-mon[47668]: pgmap v1344: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:32.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:48:32.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:48:32.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:32 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:48:32.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:48:32.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:48:32.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:32 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:48:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:33 vm00.local ceph-mon[47668]: pgmap v1345: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:48:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:48:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:33 vm08.local ceph-mon[56824]: pgmap v1345: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:48:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:48:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:35 vm08.local ceph-mon[56824]: pgmap v1346: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:35.674 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:35.675 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:35.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:35 vm00.local ceph-mon[47668]: pgmap v1346: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:35.702 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:35.703 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:37 vm08.local ceph-mon[56824]: pgmap v1347: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:37 vm00.local ceph-mon[47668]: pgmap v1347: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:39 vm08.local ceph-mon[56824]: pgmap v1348: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:39 vm00.local ceph-mon[47668]: pgmap v1348: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:40.704 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:40.705 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:40.731 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:40.731 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:41 vm08.local ceph-mon[56824]: pgmap v1349: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:41 vm00.local ceph-mon[47668]: pgmap v1349: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:43.716 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:43 vm00.local ceph-mon[47668]: pgmap v1350: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:43 vm08.local ceph-mon[56824]: pgmap v1350: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:44 vm08.local ceph-mon[56824]: pgmap v1351: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:44 vm00.local ceph-mon[47668]: pgmap v1351: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:45.732 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:45.733 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:45.786 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:45.787 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:47 vm08.local ceph-mon[56824]: pgmap v1352: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:47 vm00.local ceph-mon[47668]: pgmap v1352: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:49 vm08.local ceph-mon[56824]: pgmap v1353: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:49 vm00.local ceph-mon[47668]: pgmap v1353: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:50.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:50 vm08.local ceph-mon[56824]: pgmap v1354: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:50.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:50 vm00.local ceph-mon[47668]: pgmap v1354: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:50.788 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:50.789 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:50.818 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:50.819 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:53 vm00.local ceph-mon[47668]: pgmap v1355: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:53 vm08.local ceph-mon[56824]: pgmap v1355: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:54.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:54 vm00.local ceph-mon[47668]: pgmap v1356: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:54 vm08.local ceph-mon[56824]: pgmap v1356: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:55.820 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:48:55.820 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:48:55.847 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:48:55.847 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:48:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:57 vm08.local ceph-mon[56824]: pgmap v1357: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:57 vm00.local ceph-mon[47668]: pgmap v1357: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:48:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:48:59 vm08.local ceph-mon[56824]: pgmap v1358: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:48:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:48:59 vm00.local ceph-mon[47668]: pgmap v1358: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:00.849 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:00.849 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:00.875 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:00.875 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:01 vm08.local ceph-mon[56824]: pgmap v1359: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:01 vm00.local ceph-mon[47668]: pgmap v1359: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:03 vm00.local ceph-mon[47668]: pgmap v1360: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:03 vm08.local ceph-mon[56824]: pgmap v1360: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:05 vm08.local ceph-mon[56824]: pgmap v1361: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:05 vm00.local ceph-mon[47668]: pgmap v1361: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:05.876 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:05.877 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:05.902 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:05.903 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:49:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:49:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:49:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:49:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:07 vm08.local ceph-mon[56824]: pgmap v1362: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:07 vm00.local ceph-mon[47668]: pgmap v1362: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:09 vm08.local ceph-mon[56824]: pgmap v1363: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:09 vm00.local ceph-mon[47668]: pgmap v1363: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:10.904 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:10.904 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:10.929 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:10.930 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:11 vm08.local ceph-mon[56824]: pgmap v1364: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:11 vm00.local ceph-mon[47668]: pgmap v1364: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:13 vm00.local ceph-mon[47668]: pgmap v1365: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:13 vm08.local ceph-mon[56824]: pgmap v1365: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:15 vm08.local ceph-mon[56824]: pgmap v1366: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:15 vm00.local ceph-mon[47668]: pgmap v1366: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:15.931 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:15.931 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:15.957 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:15.957 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:17 vm08.local ceph-mon[56824]: pgmap v1367: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:17 vm00.local ceph-mon[47668]: pgmap v1367: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:19 vm08.local ceph-mon[56824]: pgmap v1368: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:19 vm00.local ceph-mon[47668]: pgmap v1368: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:20.959 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:20.959 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:20.985 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:20.985 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:21 vm08.local ceph-mon[56824]: pgmap v1369: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:21 vm00.local ceph-mon[47668]: pgmap v1369: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:23 vm08.local ceph-mon[56824]: pgmap v1370: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:23 vm00.local ceph-mon[47668]: pgmap v1370: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:24.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:24 vm08.local ceph-mon[56824]: pgmap v1371: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:24.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:24 vm00.local ceph-mon[47668]: pgmap v1371: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:25.986 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:25.987 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:26.013 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:26.014 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:27 vm08.local ceph-mon[56824]: pgmap v1372: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:27 vm00.local ceph-mon[47668]: pgmap v1372: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:29 vm08.local ceph-mon[56824]: pgmap v1373: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:29 vm00.local ceph-mon[47668]: pgmap v1373: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:31.015 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:31.016 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:31.041 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:31.041 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:31 vm08.local ceph-mon[56824]: pgmap v1374: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:31 vm00.local ceph-mon[47668]: pgmap v1374: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:33 vm00.local ceph-mon[47668]: pgmap v1375: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:49:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:49:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:49:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:49:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:49:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:33 vm08.local ceph-mon[56824]: pgmap v1375: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:49:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:49:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:49:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:49:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:49:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:35 vm08.local ceph-mon[56824]: pgmap v1376: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:35 vm00.local ceph-mon[47668]: pgmap v1376: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:36.043 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:36.043 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:36.068 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:36.069 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:37 vm08.local ceph-mon[56824]: pgmap v1377: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:37 vm00.local ceph-mon[47668]: pgmap v1377: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:39 vm08.local ceph-mon[56824]: pgmap v1378: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:39 vm00.local ceph-mon[47668]: pgmap v1378: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:41.070 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:41.070 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:41.099 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:41.099 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:41 vm08.local ceph-mon[56824]: pgmap v1379: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:41 vm00.local ceph-mon[47668]: pgmap v1379: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:43 vm08.local ceph-mon[56824]: pgmap v1380: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:43.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:43 vm00.local ceph-mon[47668]: pgmap v1380: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:45 vm08.local ceph-mon[56824]: pgmap v1381: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:45 vm00.local ceph-mon[47668]: pgmap v1381: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:46.100 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:46.101 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:46.126 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:46.127 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:47 vm08.local ceph-mon[56824]: pgmap v1382: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:47 vm00.local ceph-mon[47668]: pgmap v1382: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:49 vm08.local ceph-mon[56824]: pgmap v1383: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:49 vm00.local ceph-mon[47668]: pgmap v1383: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:50 vm00.local ceph-mon[47668]: pgmap v1384: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:50 vm08.local ceph-mon[56824]: pgmap v1384: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:51.128 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:51.129 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:51.155 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:51.156 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:53 vm00.local ceph-mon[47668]: pgmap v1385: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:53 vm08.local ceph-mon[56824]: pgmap v1385: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:55 vm08.local ceph-mon[56824]: pgmap v1386: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:55 vm00.local ceph-mon[47668]: pgmap v1386: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:56.157 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:49:56.158 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:49:56.186 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:49:56.187 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:49:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:57 vm08.local ceph-mon[56824]: pgmap v1387: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:57 vm00.local ceph-mon[47668]: pgmap v1387: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:49:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:49:59 vm08.local ceph-mon[56824]: pgmap v1388: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:49:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:49:59 vm00.local ceph-mon[47668]: pgmap v1388: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:00.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:00 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-08T23:50:00.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:00 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-08T23:50:01.188 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:01.189 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:01.217 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:01.218 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:01 vm08.local ceph-mon[56824]: pgmap v1389: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:01 vm00.local ceph-mon[47668]: pgmap v1389: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:03 vm00.local ceph-mon[47668]: pgmap v1390: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:03 vm08.local ceph-mon[56824]: pgmap v1390: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:05 vm08.local ceph-mon[56824]: pgmap v1391: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:05.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:05 vm00.local ceph-mon[47668]: pgmap v1391: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:06.219 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:06.220 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:06.245 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:06.245 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:50:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:50:06.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:50:06.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:50:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:07 vm08.local ceph-mon[56824]: pgmap v1392: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:07 vm00.local ceph-mon[47668]: pgmap v1392: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:09 vm08.local ceph-mon[56824]: pgmap v1393: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:09 vm00.local ceph-mon[47668]: pgmap v1393: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:11.246 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:11.247 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:11.274 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:11.274 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:11 vm08.local ceph-mon[56824]: pgmap v1394: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:11 vm00.local ceph-mon[47668]: pgmap v1394: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:13 vm08.local ceph-mon[56824]: pgmap v1395: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:13.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:13 vm00.local ceph-mon[47668]: pgmap v1395: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:14.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:14 vm00.local ceph-mon[47668]: pgmap v1396: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:14.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:14 vm08.local ceph-mon[56824]: pgmap v1396: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:16.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:16.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:16.310 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:16.311 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:17 vm08.local ceph-mon[56824]: pgmap v1397: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:17 vm00.local ceph-mon[47668]: pgmap v1397: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:19 vm08.local ceph-mon[56824]: pgmap v1398: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:19 vm00.local ceph-mon[47668]: pgmap v1398: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:21.312 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:21.313 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:21.340 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:21.340 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:21 vm08.local ceph-mon[56824]: pgmap v1399: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:21 vm00.local ceph-mon[47668]: pgmap v1399: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:23 vm00.local ceph-mon[47668]: pgmap v1400: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:23 vm08.local ceph-mon[56824]: pgmap v1400: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:25 vm08.local ceph-mon[56824]: pgmap v1401: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:25.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:25 vm00.local ceph-mon[47668]: pgmap v1401: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:26.342 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:26.342 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:26.372 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:26.372 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:27 vm08.local ceph-mon[56824]: pgmap v1402: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:27 vm00.local ceph-mon[47668]: pgmap v1402: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:29 vm08.local ceph-mon[56824]: pgmap v1403: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:29.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:29 vm00.local ceph-mon[47668]: pgmap v1403: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:31.374 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:31.375 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:31.400 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:31.401 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:31 vm08.local ceph-mon[56824]: pgmap v1404: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:31 vm00.local ceph-mon[47668]: pgmap v1404: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:33 vm08.local ceph-mon[56824]: pgmap v1405: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:50:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:50:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:50:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:50:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:50:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:33 vm00.local ceph-mon[47668]: pgmap v1405: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:50:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:50:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:50:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:50:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:50:34.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:34 vm00.local ceph-mon[47668]: pgmap v1406: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:34.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:34 vm08.local ceph-mon[56824]: pgmap v1406: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:36.403 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:36.403 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:36.430 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:36.431 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:37 vm08.local ceph-mon[56824]: pgmap v1407: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:37.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:37 vm00.local ceph-mon[47668]: pgmap v1407: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:39 vm08.local ceph-mon[56824]: pgmap v1408: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:39.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:39 vm00.local ceph-mon[47668]: pgmap v1408: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:41.432 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:41.433 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:41.459 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:41.459 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:41 vm08.local ceph-mon[56824]: pgmap v1409: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:41.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:41 vm00.local ceph-mon[47668]: pgmap v1409: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:43.603 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:43 vm00.local ceph-mon[47668]: pgmap v1410: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:43 vm08.local ceph-mon[56824]: pgmap v1410: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:45 vm08.local ceph-mon[56824]: pgmap v1411: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:45 vm00.local ceph-mon[47668]: pgmap v1411: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:46.461 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:46.462 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:46.489 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:46.489 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:47 vm08.local ceph-mon[56824]: pgmap v1412: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:47 vm00.local ceph-mon[47668]: pgmap v1412: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:49 vm08.local ceph-mon[56824]: pgmap v1413: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:49 vm00.local ceph-mon[47668]: pgmap v1413: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:50 vm08.local ceph-mon[56824]: pgmap v1414: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:50 vm00.local ceph-mon[47668]: pgmap v1414: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:51.491 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:51.491 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:51.560 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:51.561 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:53 vm00.local ceph-mon[47668]: pgmap v1415: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:53.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:53 vm08.local ceph-mon[56824]: pgmap v1415: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:55 vm08.local ceph-mon[56824]: pgmap v1416: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:55 vm00.local ceph-mon[47668]: pgmap v1416: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:56.562 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:50:56.563 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:50:56.589 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:50:56.590 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:50:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:57 vm08.local ceph-mon[56824]: pgmap v1417: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:57 vm00.local ceph-mon[47668]: pgmap v1417: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:50:59.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:50:59 vm00.local ceph-mon[47668]: pgmap v1418: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:50:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:50:59 vm08.local ceph-mon[56824]: pgmap v1418: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:00.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:00 vm00.local ceph-mon[47668]: pgmap v1419: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:00.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:00 vm08.local ceph-mon[56824]: pgmap v1419: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:01.591 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:01.592 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:01.617 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:01.617 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:03 vm00.local ceph-mon[47668]: pgmap v1420: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:03 vm08.local ceph-mon[56824]: pgmap v1420: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:05 vm08.local ceph-mon[56824]: pgmap v1421: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:05 vm00.local ceph-mon[47668]: pgmap v1421: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:06.619 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:06.619 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:51:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:51:06.647 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:06.647 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:51:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:51:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:07 vm08.local ceph-mon[56824]: pgmap v1422: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:07 vm00.local ceph-mon[47668]: pgmap v1422: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:09 vm08.local ceph-mon[56824]: pgmap v1423: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:09 vm00.local ceph-mon[47668]: pgmap v1423: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:11 vm08.local ceph-mon[56824]: pgmap v1424: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:11.649 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:11.649 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:11.677 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:11.678 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:11 vm00.local ceph-mon[47668]: pgmap v1424: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:13 vm08.local ceph-mon[56824]: pgmap v1425: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:13.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:13 vm00.local ceph-mon[47668]: pgmap v1425: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:15 vm08.local ceph-mon[56824]: pgmap v1426: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:15 vm00.local ceph-mon[47668]: pgmap v1426: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:16.679 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:16.680 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:16.706 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:16.706 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:17 vm08.local ceph-mon[56824]: pgmap v1427: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:17 vm00.local ceph-mon[47668]: pgmap v1427: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:19 vm08.local ceph-mon[56824]: pgmap v1428: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:19 vm00.local ceph-mon[47668]: pgmap v1428: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:21 vm08.local ceph-mon[56824]: pgmap v1429: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:21 vm00.local ceph-mon[47668]: pgmap v1429: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:21.708 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:21.708 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:21.733 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:21.734 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:23 vm08.local ceph-mon[56824]: pgmap v1430: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:23 vm00.local ceph-mon[47668]: pgmap v1430: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:25 vm08.local ceph-mon[56824]: pgmap v1431: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:25.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:25 vm00.local ceph-mon[47668]: pgmap v1431: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:26.735 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:26.736 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:26.762 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:26.763 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:27 vm08.local ceph-mon[56824]: pgmap v1432: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:27 vm00.local ceph-mon[47668]: pgmap v1432: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:29 vm08.local ceph-mon[56824]: pgmap v1433: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:29 vm00.local ceph-mon[47668]: pgmap v1433: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:31 vm08.local ceph-mon[56824]: pgmap v1434: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:31 vm00.local ceph-mon[47668]: pgmap v1434: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:31.764 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:31.765 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:31.790 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:31.791 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:32.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:32 vm00.local ceph-mon[47668]: pgmap v1435: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:32 vm08.local ceph-mon[56824]: pgmap v1435: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:33.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:51:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:51:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:51:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:51:33.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:51:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:51:33.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:51:33.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:51:33.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:51:33.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:51:34.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:34 vm00.local ceph-mon[47668]: pgmap v1436: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:34.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:34 vm08.local ceph-mon[56824]: pgmap v1436: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:36.792 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:36.793 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:36.819 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:36.819 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:37 vm08.local ceph-mon[56824]: pgmap v1437: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:37 vm00.local ceph-mon[47668]: pgmap v1437: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:39 vm08.local ceph-mon[56824]: pgmap v1438: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:39 vm00.local ceph-mon[47668]: pgmap v1438: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:41 vm08.local ceph-mon[56824]: pgmap v1439: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:41 vm00.local ceph-mon[47668]: pgmap v1439: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:41.821 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:41.822 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:41.851 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:41.851 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:43 vm08.local ceph-mon[56824]: pgmap v1440: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:43 vm00.local ceph-mon[47668]: pgmap v1440: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:44 vm00.local ceph-mon[47668]: pgmap v1441: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:44.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:44 vm08.local ceph-mon[56824]: pgmap v1441: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:46.853 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:46.853 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:46.881 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:46.882 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:47 vm08.local ceph-mon[56824]: pgmap v1442: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:47 vm00.local ceph-mon[47668]: pgmap v1442: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:49 vm08.local ceph-mon[56824]: pgmap v1443: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:49 vm00.local ceph-mon[47668]: pgmap v1443: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:51.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:51 vm08.local ceph-mon[56824]: pgmap v1444: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:51 vm00.local ceph-mon[47668]: pgmap v1444: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:51.883 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:51.884 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:51.910 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:51.911 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:53 vm08.local ceph-mon[56824]: pgmap v1445: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:53 vm00.local ceph-mon[47668]: pgmap v1445: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:54.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:54 vm00.local ceph-mon[47668]: pgmap v1446: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:54 vm08.local ceph-mon[56824]: pgmap v1446: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:56.912 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:51:56.913 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:51:56.940 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:51:56.941 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:51:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:57 vm08.local ceph-mon[56824]: pgmap v1447: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:57.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:57 vm00.local ceph-mon[47668]: pgmap v1447: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:51:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:51:59 vm08.local ceph-mon[56824]: pgmap v1448: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:51:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:51:59 vm00.local ceph-mon[47668]: pgmap v1448: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:01 vm08.local ceph-mon[56824]: pgmap v1449: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:01 vm00.local ceph-mon[47668]: pgmap v1449: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:01.942 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:01.943 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:01.968 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:01.969 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:03 vm08.local ceph-mon[56824]: pgmap v1450: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:03.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:03 vm00.local ceph-mon[47668]: pgmap v1450: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:04.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:04 vm00.local ceph-mon[47668]: pgmap v1451: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:04 vm08.local ceph-mon[56824]: pgmap v1451: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:52:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:52:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:52:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:52:06.971 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:06.971 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:06.998 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:06.999 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:07 vm08.local ceph-mon[56824]: pgmap v1452: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:07 vm00.local ceph-mon[47668]: pgmap v1452: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:09 vm08.local ceph-mon[56824]: pgmap v1453: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:09 vm00.local ceph-mon[47668]: pgmap v1453: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:11 vm08.local ceph-mon[56824]: pgmap v1454: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:11 vm00.local ceph-mon[47668]: pgmap v1454: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:12.000 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:12.000 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:12.027 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:12.027 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:13 vm08.local ceph-mon[56824]: pgmap v1455: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:13 vm00.local ceph-mon[47668]: pgmap v1455: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:14.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:14 vm08.local ceph-mon[56824]: pgmap v1456: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:14.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:14 vm00.local ceph-mon[47668]: pgmap v1456: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:17.029 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:17.029 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:17.059 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:17.060 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:17 vm08.local ceph-mon[56824]: pgmap v1457: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:17 vm00.local ceph-mon[47668]: pgmap v1457: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:19 vm08.local ceph-mon[56824]: pgmap v1458: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:19 vm00.local ceph-mon[47668]: pgmap v1458: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:21 vm00.local ceph-mon[47668]: pgmap v1459: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:21 vm08.local ceph-mon[56824]: pgmap v1459: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:22.061 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:22.062 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:22.086 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:22.087 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:22.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:22 vm00.local ceph-mon[47668]: pgmap v1460: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:22 vm08.local ceph-mon[56824]: pgmap v1460: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:25 vm08.local ceph-mon[56824]: pgmap v1461: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:25.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:25 vm00.local ceph-mon[47668]: pgmap v1461: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:27.089 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:27.089 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:27.117 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:27.117 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:27 vm08.local ceph-mon[56824]: pgmap v1462: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:27 vm00.local ceph-mon[47668]: pgmap v1462: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:29 vm00.local ceph-mon[47668]: pgmap v1463: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:29 vm08.local ceph-mon[56824]: pgmap v1463: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:31 vm08.local ceph-mon[56824]: pgmap v1464: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:31 vm00.local ceph-mon[47668]: pgmap v1464: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:32.119 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:32.119 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:32.145 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:32.146 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:32.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:32 vm00.local ceph-mon[47668]: pgmap v1465: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:32 vm08.local ceph-mon[56824]: pgmap v1465: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:33.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:52:33.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:52:33.387 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:52:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:52:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:52:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:52:34.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:52:34.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:52:34.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:34 vm08.local ceph-mon[56824]: pgmap v1466: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:34.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:52:34.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:52:34.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:34 vm00.local ceph-mon[47668]: pgmap v1466: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:37.147 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:37.147 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:37.192 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:37.192 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:37 vm08.local ceph-mon[56824]: pgmap v1467: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:37 vm00.local ceph-mon[47668]: pgmap v1467: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:39 vm00.local ceph-mon[47668]: pgmap v1468: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:39 vm08.local ceph-mon[56824]: pgmap v1468: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:41 vm08.local ceph-mon[56824]: pgmap v1469: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:41 vm00.local ceph-mon[47668]: pgmap v1469: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:42.194 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:42.194 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:42.220 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:42.220 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:42.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:42 vm00.local ceph-mon[47668]: pgmap v1470: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:42 vm08.local ceph-mon[56824]: pgmap v1470: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:45 vm08.local ceph-mon[56824]: pgmap v1471: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:45 vm00.local ceph-mon[47668]: pgmap v1471: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:47.221 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:47.222 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:47.247 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:47.247 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:47 vm08.local ceph-mon[56824]: pgmap v1472: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:47 vm00.local ceph-mon[47668]: pgmap v1472: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:49 vm00.local ceph-mon[47668]: pgmap v1473: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:49 vm08.local ceph-mon[56824]: pgmap v1473: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:51 vm08.local ceph-mon[56824]: pgmap v1474: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:51 vm00.local ceph-mon[47668]: pgmap v1474: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:52.248 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:52.249 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:52.274 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:52.275 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:52.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:52 vm00.local ceph-mon[47668]: pgmap v1475: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:52.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:52 vm08.local ceph-mon[56824]: pgmap v1475: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:55 vm08.local ceph-mon[56824]: pgmap v1476: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:55.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:55 vm00.local ceph-mon[47668]: pgmap v1476: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:57.276 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:52:57.276 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:52:57.302 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:52:57.303 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:52:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:57 vm08.local ceph-mon[56824]: pgmap v1477: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:57 vm00.local ceph-mon[47668]: pgmap v1477: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:52:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:52:59 vm00.local ceph-mon[47668]: pgmap v1478: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:52:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:52:59 vm08.local ceph-mon[56824]: pgmap v1478: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:01 vm08.local ceph-mon[56824]: pgmap v1479: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:01 vm00.local ceph-mon[47668]: pgmap v1479: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:02.304 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:02.305 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:02.331 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:02.331 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:02.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:02 vm00.local ceph-mon[47668]: pgmap v1480: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:02 vm08.local ceph-mon[56824]: pgmap v1480: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:05 vm00.local ceph-mon[47668]: pgmap v1481: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:05 vm08.local ceph-mon[56824]: pgmap v1481: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:53:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:53:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:53:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:53:07.332 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:07.333 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:07.359 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:07.360 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:07 vm00.local ceph-mon[47668]: pgmap v1482: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:07 vm08.local ceph-mon[56824]: pgmap v1482: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:08.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:08 vm00.local ceph-mon[47668]: pgmap v1483: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:08.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:08 vm08.local ceph-mon[56824]: pgmap v1483: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:11.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:11 vm08.local ceph-mon[56824]: pgmap v1484: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:11 vm00.local ceph-mon[47668]: pgmap v1484: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:12.362 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:12.362 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:12.389 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:12.390 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:12 vm00.local ceph-mon[47668]: pgmap v1485: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:12 vm08.local ceph-mon[56824]: pgmap v1485: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:15 vm08.local ceph-mon[56824]: pgmap v1486: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:15 vm00.local ceph-mon[47668]: pgmap v1486: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:17.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:17 vm08.local ceph-mon[56824]: pgmap v1487: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:17.391 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:17.392 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:17.417 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:17.418 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:17 vm00.local ceph-mon[47668]: pgmap v1487: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:19 vm00.local ceph-mon[47668]: pgmap v1488: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:19 vm08.local ceph-mon[56824]: pgmap v1488: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:21 vm08.local ceph-mon[56824]: pgmap v1489: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:21 vm00.local ceph-mon[47668]: pgmap v1489: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:22.419 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:22.419 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:22.446 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:22.447 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:22.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:22 vm00.local ceph-mon[47668]: pgmap v1490: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:22 vm08.local ceph-mon[56824]: pgmap v1490: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:25 vm08.local ceph-mon[56824]: pgmap v1491: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:25 vm00.local ceph-mon[47668]: pgmap v1491: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:27 vm08.local ceph-mon[56824]: pgmap v1492: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:27 vm00.local ceph-mon[47668]: pgmap v1492: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:27.448 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:27.449 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:27.474 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:27.474 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:29 vm00.local ceph-mon[47668]: pgmap v1493: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:29 vm08.local ceph-mon[56824]: pgmap v1493: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:31 vm08.local ceph-mon[56824]: pgmap v1494: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:31 vm00.local ceph-mon[47668]: pgmap v1494: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:32.476 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:32.478 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:32.503 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:32.503 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:32.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:32 vm00.local ceph-mon[47668]: pgmap v1495: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:32 vm08.local ceph-mon[56824]: pgmap v1495: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:53:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:53:33.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:53:33.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:53:33.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:53:33.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:53:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:53:35.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:53:35.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:34 vm08.local ceph-mon[56824]: pgmap v1496: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:53:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:53:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:34 vm00.local ceph-mon[47668]: pgmap v1496: 97 pgs: 97 active+clean; 453 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:37 vm00.local ceph-mon[47668]: pgmap v1497: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:37.505 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:37.506 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:37 vm08.local ceph-mon[56824]: pgmap v1497: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:37.680 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:37.717 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:39 vm08.local ceph-mon[56824]: pgmap v1498: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:39 vm00.local ceph-mon[47668]: pgmap v1498: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:41 vm08.local ceph-mon[56824]: pgmap v1499: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:41 vm00.local ceph-mon[47668]: pgmap v1499: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:42.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:42 vm00.local ceph-mon[47668]: pgmap v1500: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:42.682 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:42.683 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:42.709 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:42.709 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:42 vm08.local ceph-mon[56824]: pgmap v1500: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:45 vm08.local ceph-mon[56824]: pgmap v1501: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:45 vm00.local ceph-mon[47668]: pgmap v1501: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:47 vm08.local ceph-mon[56824]: pgmap v1502: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:47 vm00.local ceph-mon[47668]: pgmap v1502: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:47.711 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:47.711 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:47.742 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:47.742 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:49 vm00.local ceph-mon[47668]: pgmap v1503: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:49 vm08.local ceph-mon[56824]: pgmap v1503: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:51.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:51 vm00.local ceph-mon[47668]: pgmap v1504: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:51 vm08.local ceph-mon[56824]: pgmap v1504: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:52.743 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:52.744 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:52.967 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:52.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:52 vm00.local ceph-mon[47668]: pgmap v1505: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:52 vm08.local ceph-mon[56824]: pgmap v1505: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:55 vm08.local ceph-mon[56824]: pgmap v1506: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:55 vm00.local ceph-mon[47668]: pgmap v1506: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:57 vm08.local ceph-mon[56824]: pgmap v1507: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:57 vm00.local ceph-mon[47668]: pgmap v1507: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:53:57.970 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:53:57.971 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:53:57.997 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:53:57.997 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:53:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:53:59 vm08.local ceph-mon[56824]: pgmap v1508: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:53:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:53:59 vm00.local ceph-mon[47668]: pgmap v1508: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:01 vm08.local ceph-mon[56824]: pgmap v1509: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:01 vm00.local ceph-mon[47668]: pgmap v1509: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:02.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:02 vm00.local ceph-mon[47668]: pgmap v1510: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:02 vm08.local ceph-mon[56824]: pgmap v1510: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:02.998 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:02.999 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:03.024 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:03.025 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:05 vm08.local ceph-mon[56824]: pgmap v1511: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:05 vm00.local ceph-mon[47668]: pgmap v1511: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:07 vm08.local ceph-mon[56824]: pgmap v1512: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:07 vm00.local ceph-mon[47668]: pgmap v1512: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:08.026 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:08.026 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:08.052 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:08.053 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:09.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:09 vm00.local ceph-mon[47668]: pgmap v1513: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:09 vm08.local ceph-mon[56824]: pgmap v1513: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:11 vm08.local ceph-mon[56824]: pgmap v1514: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:11.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:11 vm00.local ceph-mon[47668]: pgmap v1514: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:12.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:12 vm00.local ceph-mon[47668]: pgmap v1515: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:12 vm08.local ceph-mon[56824]: pgmap v1515: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:13.054 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:13.055 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:13.080 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:13.080 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:15 vm08.local ceph-mon[56824]: pgmap v1516: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:15 vm00.local ceph-mon[47668]: pgmap v1516: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:17 vm08.local ceph-mon[56824]: pgmap v1517: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:17 vm00.local ceph-mon[47668]: pgmap v1517: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:18.082 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:18.082 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:18.109 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:18.109 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:19 vm00.local ceph-mon[47668]: pgmap v1518: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:19 vm08.local ceph-mon[56824]: pgmap v1518: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:21.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:21 vm00.local ceph-mon[47668]: pgmap v1519: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:21 vm08.local ceph-mon[56824]: pgmap v1519: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:22.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:22 vm00.local ceph-mon[47668]: pgmap v1520: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:22 vm08.local ceph-mon[56824]: pgmap v1520: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:23.110 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:23.111 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:23.137 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:23.138 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:25 vm08.local ceph-mon[56824]: pgmap v1521: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:25 vm00.local ceph-mon[47668]: pgmap v1521: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:27 vm08.local ceph-mon[56824]: pgmap v1522: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:27 vm00.local ceph-mon[47668]: pgmap v1522: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:28.139 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:28.140 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:28.177 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:28.177 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:29 vm00.local ceph-mon[47668]: pgmap v1523: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:29 vm08.local ceph-mon[56824]: pgmap v1523: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:31 vm08.local ceph-mon[56824]: pgmap v1524: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:31.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:31 vm00.local ceph-mon[47668]: pgmap v1524: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:32 vm08.local ceph-mon[56824]: pgmap v1525: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:32 vm00.local ceph-mon[47668]: pgmap v1525: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:33.179 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:33.179 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:33.317 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:33.317 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:34.023 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:34.023 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:34.023 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:33 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:34.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:34.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:33 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:54:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:34 vm08.local ceph-mon[56824]: pgmap v1526: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:54:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:34 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:54:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:54:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:34 vm00.local ceph-mon[47668]: pgmap v1526: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:54:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:34 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:54:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:37 vm08.local ceph-mon[56824]: pgmap v1527: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:37 vm00.local ceph-mon[47668]: pgmap v1527: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:38.319 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:38.319 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:38.344 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:38.344 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:39 vm00.local ceph-mon[47668]: pgmap v1528: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:39 vm08.local ceph-mon[56824]: pgmap v1528: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:41 vm08.local ceph-mon[56824]: pgmap v1529: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:41 vm00.local ceph-mon[47668]: pgmap v1529: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:42.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:42 vm08.local ceph-mon[56824]: pgmap v1530: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:42 vm00.local ceph-mon[47668]: pgmap v1530: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:43.345 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:43.346 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:43.448 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:43.448 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:45 vm08.local ceph-mon[56824]: pgmap v1531: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:45 vm00.local ceph-mon[47668]: pgmap v1531: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:47 vm08.local ceph-mon[56824]: pgmap v1532: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:47 vm00.local ceph-mon[47668]: pgmap v1532: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:48.449 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:48.450 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:48.475 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:48.475 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:49 vm00.local ceph-mon[47668]: pgmap v1533: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:49 vm08.local ceph-mon[56824]: pgmap v1533: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:51.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:51 vm08.local ceph-mon[56824]: pgmap v1534: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:51 vm00.local ceph-mon[47668]: pgmap v1534: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:52.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:52 vm08.local ceph-mon[56824]: pgmap v1535: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:52.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:52 vm00.local ceph-mon[47668]: pgmap v1535: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:53.477 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:53.477 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:53.514 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:53.515 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:55 vm08.local ceph-mon[56824]: pgmap v1536: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:55 vm00.local ceph-mon[47668]: pgmap v1536: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:57.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:57 vm08.local ceph-mon[56824]: pgmap v1537: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:57.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:57 vm00.local ceph-mon[47668]: pgmap v1537: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:54:58.516 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:54:58.516 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:54:58.542 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:54:58.543 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:54:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:54:59 vm00.local ceph-mon[47668]: pgmap v1538: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:54:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:54:59 vm08.local ceph-mon[56824]: pgmap v1538: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:01 vm08.local ceph-mon[56824]: pgmap v1539: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:01 vm00.local ceph-mon[47668]: pgmap v1539: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:02 vm08.local ceph-mon[56824]: pgmap v1540: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:02 vm00.local ceph-mon[47668]: pgmap v1540: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:03.544 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:03.545 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:03.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:03.573 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:05 vm08.local ceph-mon[56824]: pgmap v1541: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:05 vm00.local ceph-mon[47668]: pgmap v1541: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:55:06.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:55:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:55:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:55:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:07 vm08.local ceph-mon[56824]: pgmap v1542: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:07.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:07 vm00.local ceph-mon[47668]: pgmap v1542: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:08.575 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:08.575 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:08.601 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:08.602 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:09.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:09 vm00.local ceph-mon[47668]: pgmap v1543: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:09 vm08.local ceph-mon[56824]: pgmap v1543: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:11 vm08.local ceph-mon[56824]: pgmap v1544: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:11 vm00.local ceph-mon[47668]: pgmap v1544: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:12.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:12 vm08.local ceph-mon[56824]: pgmap v1545: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:12.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:12 vm00.local ceph-mon[47668]: pgmap v1545: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:13.604 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:13.605 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:13.667 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:13.668 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:15 vm08.local ceph-mon[56824]: pgmap v1546: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:15 vm00.local ceph-mon[47668]: pgmap v1546: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:17.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:17 vm08.local ceph-mon[56824]: pgmap v1547: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:17.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:17 vm00.local ceph-mon[47668]: pgmap v1547: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:18.670 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:18.670 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:18.697 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:18.697 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:19 vm00.local ceph-mon[47668]: pgmap v1548: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:19.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:19 vm08.local ceph-mon[56824]: pgmap v1548: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:21 vm08.local ceph-mon[56824]: pgmap v1549: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:21 vm00.local ceph-mon[47668]: pgmap v1549: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:22.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:22 vm08.local ceph-mon[56824]: pgmap v1550: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:22 vm00.local ceph-mon[47668]: pgmap v1550: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:23.699 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:23.699 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:23.726 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:23.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:25 vm08.local ceph-mon[56824]: pgmap v1551: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:25.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:25 vm00.local ceph-mon[47668]: pgmap v1551: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:27 vm08.local ceph-mon[56824]: pgmap v1552: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:27 vm00.local ceph-mon[47668]: pgmap v1552: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:28.728 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:28.728 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:28.756 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:28.756 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:29 vm00.local ceph-mon[47668]: pgmap v1553: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:29 vm08.local ceph-mon[56824]: pgmap v1553: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:31.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:31 vm08.local ceph-mon[56824]: pgmap v1554: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:31 vm00.local ceph-mon[47668]: pgmap v1554: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:32.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:32 vm08.local ceph-mon[56824]: pgmap v1555: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:32 vm00.local ceph-mon[47668]: pgmap v1555: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:33.758 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:33.758 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:33.787 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:33.788 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: pgmap v1556: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:55:35.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: pgmap v1556: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:55:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:55:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:37 vm08.local ceph-mon[56824]: pgmap v1557: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:37 vm00.local ceph-mon[47668]: pgmap v1557: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:38.789 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:38.789 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:38.818 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:38.819 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:39 vm00.local ceph-mon[47668]: pgmap v1558: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:39 vm08.local ceph-mon[56824]: pgmap v1558: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:41 vm08.local ceph-mon[56824]: pgmap v1559: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:41.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:41 vm00.local ceph-mon[47668]: pgmap v1559: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:42.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:42 vm08.local ceph-mon[56824]: pgmap v1560: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:42.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:42 vm00.local ceph-mon[47668]: pgmap v1560: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:43.820 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:43.821 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:43.848 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:43.849 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:45 vm08.local ceph-mon[56824]: pgmap v1561: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:45.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:45 vm00.local ceph-mon[47668]: pgmap v1561: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:47 vm08.local ceph-mon[56824]: pgmap v1562: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:47.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:47 vm00.local ceph-mon[47668]: pgmap v1562: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:48.850 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:48.850 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:48.876 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:48.876 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:49 vm08.local ceph-mon[56824]: pgmap v1563: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:49 vm00.local ceph-mon[47668]: pgmap v1563: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:51 vm08.local ceph-mon[56824]: pgmap v1564: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:51.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:51 vm00.local ceph-mon[47668]: pgmap v1564: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:52 vm00.local ceph-mon[47668]: pgmap v1565: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:52 vm08.local ceph-mon[56824]: pgmap v1565: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:53.878 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:53.878 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:53.938 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:53.939 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:55 vm08.local ceph-mon[56824]: pgmap v1566: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:55 vm00.local ceph-mon[47668]: pgmap v1566: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:57.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:57 vm08.local ceph-mon[56824]: pgmap v1567: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:57 vm00.local ceph-mon[47668]: pgmap v1567: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:55:58.940 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:55:58.941 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:55:58.967 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:55:58.967 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:55:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:55:59 vm08.local ceph-mon[56824]: pgmap v1568: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:55:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:55:59 vm00.local ceph-mon[47668]: pgmap v1568: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:01 vm08.local ceph-mon[56824]: pgmap v1569: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:01 vm00.local ceph-mon[47668]: pgmap v1569: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:02 vm08.local ceph-mon[56824]: pgmap v1570: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:02 vm00.local ceph-mon[47668]: pgmap v1570: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:03.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:03.969 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:03.994 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:03.995 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:05 vm08.local ceph-mon[56824]: pgmap v1571: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:05 vm00.local ceph-mon[47668]: pgmap v1571: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:07 vm08.local ceph-mon[56824]: pgmap v1572: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:07.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:07 vm00.local ceph-mon[47668]: pgmap v1572: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:08.996 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:08.997 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:09.023 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:09.024 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:09.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:09 vm00.local ceph-mon[47668]: pgmap v1573: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:09 vm08.local ceph-mon[56824]: pgmap v1573: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:11 vm00.local ceph-mon[47668]: pgmap v1574: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:11.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:11 vm08.local ceph-mon[56824]: pgmap v1574: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:12.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:12 vm08.local ceph-mon[56824]: pgmap v1575: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:12.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:12 vm00.local ceph-mon[47668]: pgmap v1575: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:14.026 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:14.026 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:14.082 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:14.082 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:15 vm00.local ceph-mon[47668]: pgmap v1576: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:15 vm08.local ceph-mon[56824]: pgmap v1576: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:17.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:17 vm00.local ceph-mon[47668]: pgmap v1577: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:17.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:17 vm08.local ceph-mon[56824]: pgmap v1577: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:19.084 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:19.085 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:19.115 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:19.116 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:19 vm00.local ceph-mon[47668]: pgmap v1578: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:19.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:19 vm08.local ceph-mon[56824]: pgmap v1578: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:21 vm00.local ceph-mon[47668]: pgmap v1579: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:21 vm08.local ceph-mon[56824]: pgmap v1579: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:22.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:22 vm08.local ceph-mon[56824]: pgmap v1580: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:22 vm00.local ceph-mon[47668]: pgmap v1580: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:24.118 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:24.118 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:24.148 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:24.149 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:25.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:25 vm08.local ceph-mon[56824]: pgmap v1581: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:25 vm00.local ceph-mon[47668]: pgmap v1581: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:27.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:27 vm08.local ceph-mon[56824]: pgmap v1582: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:27.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:27 vm00.local ceph-mon[47668]: pgmap v1582: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:29.150 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:29.151 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:29.177 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:29.178 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:29 vm00.local ceph-mon[47668]: pgmap v1583: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:29 vm08.local ceph-mon[56824]: pgmap v1583: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:31 vm00.local ceph-mon[47668]: pgmap v1584: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:31 vm08.local ceph-mon[56824]: pgmap v1584: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:32.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:32 vm08.local ceph-mon[56824]: pgmap v1585: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:32 vm00.local ceph-mon[47668]: pgmap v1585: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:34.179 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:34.180 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:34.211 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:34.212 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: pgmap v1586: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:56:35.105 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:56:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: pgmap v1586: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:56:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:56:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:37 vm08.local ceph-mon[56824]: pgmap v1587: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:37 vm00.local ceph-mon[47668]: pgmap v1587: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:39.213 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:39.214 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:39.299 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:39.300 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:39.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:39 vm08.local ceph-mon[56824]: pgmap v1588: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:39 vm00.local ceph-mon[47668]: pgmap v1588: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:41 vm08.local ceph-mon[56824]: pgmap v1589: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:41 vm00.local ceph-mon[47668]: pgmap v1589: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:42 vm08.local ceph-mon[56824]: pgmap v1590: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:42 vm00.local ceph-mon[47668]: pgmap v1590: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:44.301 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:44.301 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:44.493 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:44.494 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:45 vm08.local ceph-mon[56824]: pgmap v1591: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:45 vm00.local ceph-mon[47668]: pgmap v1591: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:47 vm00.local ceph-mon[47668]: pgmap v1592: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:47 vm08.local ceph-mon[56824]: pgmap v1592: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:49.495 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:49.496 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:49.521 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:49.522 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:49 vm08.local ceph-mon[56824]: pgmap v1593: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:49 vm00.local ceph-mon[47668]: pgmap v1593: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:51 vm08.local ceph-mon[56824]: pgmap v1594: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:51 vm00.local ceph-mon[47668]: pgmap v1594: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:53.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:52 vm00.local ceph-mon[47668]: pgmap v1595: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:52 vm08.local ceph-mon[56824]: pgmap v1595: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:54.523 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:54.524 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:54.552 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:54.552 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:55.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:55 vm00.local ceph-mon[47668]: pgmap v1596: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:55 vm08.local ceph-mon[56824]: pgmap v1596: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:57 vm00.local ceph-mon[47668]: pgmap v1597: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:57.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:57 vm08.local ceph-mon[56824]: pgmap v1597: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:56:59.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:56:59 vm00.local ceph-mon[47668]: pgmap v1598: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:56:59.554 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:56:59.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:56:59.581 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:56:59.582 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:56:59.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:56:59 vm08.local ceph-mon[56824]: pgmap v1598: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:01 vm00.local ceph-mon[47668]: pgmap v1599: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:01 vm08.local ceph-mon[56824]: pgmap v1599: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:02.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:02 vm08.local ceph-mon[56824]: pgmap v1600: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:02 vm00.local ceph-mon[47668]: pgmap v1600: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:04.583 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:04.584 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:04.612 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:04.612 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:05.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:05 vm08.local ceph-mon[56824]: pgmap v1601: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:05 vm00.local ceph-mon[47668]: pgmap v1601: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:06.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:07 vm08.local ceph-mon[56824]: pgmap v1602: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:07 vm00.local ceph-mon[47668]: pgmap v1602: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:09 vm00.local ceph-mon[47668]: pgmap v1603: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:09 vm08.local ceph-mon[56824]: pgmap v1603: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:09.613 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:09.614 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:09.684 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:09.684 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:11 vm00.local ceph-mon[47668]: pgmap v1604: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:11 vm08.local ceph-mon[56824]: pgmap v1604: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:12.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:12 vm00.local ceph-mon[47668]: pgmap v1605: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:12 vm08.local ceph-mon[56824]: pgmap v1605: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:14.686 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:14.686 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:14.713 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:14.713 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:15 vm08.local ceph-mon[56824]: pgmap v1606: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:15 vm00.local ceph-mon[47668]: pgmap v1606: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:17 vm08.local ceph-mon[56824]: pgmap v1607: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:17 vm00.local ceph-mon[47668]: pgmap v1607: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:19.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:19 vm08.local ceph-mon[56824]: pgmap v1608: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:19.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:19 vm00.local ceph-mon[47668]: pgmap v1608: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:19.715 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:19.715 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:19.810 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:19.811 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:21 vm00.local ceph-mon[47668]: pgmap v1609: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:21.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:21 vm08.local ceph-mon[56824]: pgmap v1609: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:23 vm08.local ceph-mon[56824]: pgmap v1610: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:23 vm00.local ceph-mon[47668]: pgmap v1610: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:24.812 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:24.813 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:24.841 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:24.841 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:24.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:24 vm08.local ceph-mon[56824]: pgmap v1611: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:24.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:24 vm00.local ceph-mon[47668]: pgmap v1611: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:27.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:27 vm08.local ceph-mon[56824]: pgmap v1612: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:27 vm00.local ceph-mon[47668]: pgmap v1612: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:29 vm00.local ceph-mon[47668]: pgmap v1613: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:29 vm08.local ceph-mon[56824]: pgmap v1613: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:29.842 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:29.843 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:29.870 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:29.871 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:31 vm00.local ceph-mon[47668]: pgmap v1614: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:31 vm08.local ceph-mon[56824]: pgmap v1614: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:32.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:32 vm08.local ceph-mon[56824]: pgmap v1615: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:32 vm00.local ceph-mon[47668]: pgmap v1615: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:34.872 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:34.873 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:34.900 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:34.901 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:35.353 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:35 vm08.local ceph-mon[56824]: pgmap v1616: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:35.353 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:57:35.353 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:57:35.353 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:35 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:57:35.396 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:35 vm00.local ceph-mon[47668]: pgmap v1616: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:35.396 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:57:35.396 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:57:35.396 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:35 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:57:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:37 vm00.local ceph-mon[47668]: pgmap v1617: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:57:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:37 vm08.local ceph-mon[56824]: pgmap v1617: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:57:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:57:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:39 vm00.local ceph-mon[47668]: pgmap v1618: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:39 vm08.local ceph-mon[56824]: pgmap v1618: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:39.903 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:39.903 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:39.929 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:39.930 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:41.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:41 vm08.local ceph-mon[56824]: pgmap v1619: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:41 vm00.local ceph-mon[47668]: pgmap v1619: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:42.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:42 vm08.local ceph-mon[56824]: pgmap v1620: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:42 vm00.local ceph-mon[47668]: pgmap v1620: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:44.931 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:44.932 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:44.967 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:44.968 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:45 vm08.local ceph-mon[56824]: pgmap v1621: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:45 vm00.local ceph-mon[47668]: pgmap v1621: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:47.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:47 vm08.local ceph-mon[56824]: pgmap v1622: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:47 vm00.local ceph-mon[47668]: pgmap v1622: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:49 vm00.local ceph-mon[47668]: pgmap v1623: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:49 vm08.local ceph-mon[56824]: pgmap v1623: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:49.969 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:49.969 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:49.996 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:49.996 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:51.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:51 vm00.local ceph-mon[47668]: pgmap v1624: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:51 vm08.local ceph-mon[56824]: pgmap v1624: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:52.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:52 vm08.local ceph-mon[56824]: pgmap v1625: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:52.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:52 vm00.local ceph-mon[47668]: pgmap v1625: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:54.998 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:57:54.998 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:57:55.024 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:57:55.025 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:57:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:55 vm08.local ceph-mon[56824]: pgmap v1626: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:55.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:55 vm00.local ceph-mon[47668]: pgmap v1626: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:57 vm08.local ceph-mon[56824]: pgmap v1627: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:57 vm00.local ceph-mon[47668]: pgmap v1627: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:57:59.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:57:59 vm08.local ceph-mon[56824]: pgmap v1628: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:57:59.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:57:59 vm00.local ceph-mon[47668]: pgmap v1628: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:00.026 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:00.027 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:00.053 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:00.054 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:01 vm08.local ceph-mon[56824]: pgmap v1629: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:01 vm00.local ceph-mon[47668]: pgmap v1629: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:02.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:02 vm00.local ceph-mon[47668]: pgmap v1630: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:02 vm08.local ceph-mon[56824]: pgmap v1630: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:05.055 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:05.056 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:05.082 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:05.082 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:05.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:05 vm08.local ceph-mon[56824]: pgmap v1631: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:05 vm00.local ceph-mon[47668]: pgmap v1631: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:06.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:06.477 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:06.477 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:07 vm08.local ceph-mon[56824]: pgmap v1632: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:07 vm00.local ceph-mon[47668]: pgmap v1632: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:09 vm08.local ceph-mon[56824]: pgmap v1633: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:09 vm00.local ceph-mon[47668]: pgmap v1633: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:10.083 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:10.084 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:10.328 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:10.328 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:11 vm00.local ceph-mon[47668]: pgmap v1634: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:11 vm08.local ceph-mon[56824]: pgmap v1634: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:12.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:12 vm00.local ceph-mon[47668]: pgmap v1635: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:12 vm08.local ceph-mon[56824]: pgmap v1635: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:15.329 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:15.330 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:15.356 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:15.356 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:15.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:15 vm08.local ceph-mon[56824]: pgmap v1636: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:15 vm00.local ceph-mon[47668]: pgmap v1636: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:17.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:17 vm08.local ceph-mon[56824]: pgmap v1637: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:17 vm00.local ceph-mon[47668]: pgmap v1637: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:19 vm08.local ceph-mon[56824]: pgmap v1638: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:58:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:19 vm00.local ceph-mon[47668]: pgmap v1638: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:58:20.358 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:20.358 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:20.387 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:20.387 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:21.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:21 vm08.local ceph-mon[56824]: pgmap v1639: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:58:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:21 vm00.local ceph-mon[47668]: pgmap v1639: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:58:22.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:22 vm00.local ceph-mon[47668]: pgmap v1640: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:22 vm08.local ceph-mon[56824]: pgmap v1640: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:25.389 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:25.390 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:25.417 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:25.417 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:25 vm00.local ceph-mon[47668]: pgmap v1641: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:58:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:25 vm08.local ceph-mon[56824]: pgmap v1641: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-08T23:58:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:27 vm00.local ceph-mon[47668]: pgmap v1642: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:27 vm08.local ceph-mon[56824]: pgmap v1642: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:29 vm00.local ceph-mon[47668]: pgmap v1643: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:29 vm08.local ceph-mon[56824]: pgmap v1643: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:30.418 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:30.419 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:30.444 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:30.445 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:31 vm00.local ceph-mon[47668]: pgmap v1644: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:31 vm08.local ceph-mon[56824]: pgmap v1644: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:32.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:32 vm00.local ceph-mon[47668]: pgmap v1645: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:32 vm08.local ceph-mon[56824]: pgmap v1645: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:35 vm00.local ceph-mon[47668]: pgmap v1646: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:35.446 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:35.446 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:35.471 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:35.472 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:35 vm08.local ceph-mon[56824]: pgmap v1646: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:37 vm00.local ceph-mon[47668]: pgmap v1647: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:58:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:58:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:58:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:58:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:58:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:37 vm08.local ceph-mon[56824]: pgmap v1647: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:58:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:58:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:58:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:58:37.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:58:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:39 vm00.local ceph-mon[47668]: pgmap v1648: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:39 vm08.local ceph-mon[56824]: pgmap v1648: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:40.473 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:40.473 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:40.501 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:40.502 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:41 vm00.local ceph-mon[47668]: pgmap v1649: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:41 vm08.local ceph-mon[56824]: pgmap v1649: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:42.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:42 vm00.local ceph-mon[47668]: pgmap v1650: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:42 vm08.local ceph-mon[56824]: pgmap v1650: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:45 vm00.local ceph-mon[47668]: pgmap v1651: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:45.503 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:45.504 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:45.531 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:45.532 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:45 vm08.local ceph-mon[56824]: pgmap v1651: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:47 vm00.local ceph-mon[47668]: pgmap v1652: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:47 vm08.local ceph-mon[56824]: pgmap v1652: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:49.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:49 vm08.local ceph-mon[56824]: pgmap v1653: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:49 vm00.local ceph-mon[47668]: pgmap v1653: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:50.533 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:50.534 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:50.563 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:50.563 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:51 vm08.local ceph-mon[56824]: pgmap v1654: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:51 vm00.local ceph-mon[47668]: pgmap v1654: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:53 vm00.local ceph-mon[47668]: pgmap v1655: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:53 vm08.local ceph-mon[56824]: pgmap v1655: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:55.565 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:58:55.565 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:58:55.599 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:58:55.599 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:58:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:55 vm00.local ceph-mon[47668]: pgmap v1656: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:55 vm08.local ceph-mon[56824]: pgmap v1656: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:57 vm08.local ceph-mon[56824]: pgmap v1657: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:57 vm00.local ceph-mon[47668]: pgmap v1657: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:58:58.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:58:58 vm08.local ceph-mon[56824]: pgmap v1658: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:58:58.901 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:58:58 vm00.local ceph-mon[47668]: pgmap v1658: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:00.601 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:00.602 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:00.629 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:00.630 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:01 vm00.local ceph-mon[47668]: pgmap v1659: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:01 vm08.local ceph-mon[56824]: pgmap v1659: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:03.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:02 vm00.local ceph-mon[47668]: pgmap v1660: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:02 vm08.local ceph-mon[56824]: pgmap v1660: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:05 vm00.local ceph-mon[47668]: pgmap v1661: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:05 vm08.local ceph-mon[56824]: pgmap v1661: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:05.631 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:05.632 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:05.659 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:05.660 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:06.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:07 vm00.local ceph-mon[47668]: pgmap v1662: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:07 vm08.local ceph-mon[56824]: pgmap v1662: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:09 vm00.local ceph-mon[47668]: pgmap v1663: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:09 vm08.local ceph-mon[56824]: pgmap v1663: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:10.661 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:10.662 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:10.689 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:10.689 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:11.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:11 vm00.local ceph-mon[47668]: pgmap v1664: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:11 vm08.local ceph-mon[56824]: pgmap v1664: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:12 vm00.local ceph-mon[47668]: pgmap v1665: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:12 vm08.local ceph-mon[56824]: pgmap v1665: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:15 vm00.local ceph-mon[47668]: pgmap v1666: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:15 vm08.local ceph-mon[56824]: pgmap v1666: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:15.691 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:15.691 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:15.719 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:15.720 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:17 vm00.local ceph-mon[47668]: pgmap v1667: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:17 vm08.local ceph-mon[56824]: pgmap v1667: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:19 vm00.local ceph-mon[47668]: pgmap v1668: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:19 vm08.local ceph-mon[56824]: pgmap v1668: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:20.721 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:20.722 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:20.748 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:20.749 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:21 vm00.local ceph-mon[47668]: pgmap v1669: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:21 vm08.local ceph-mon[56824]: pgmap v1669: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:23.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:22 vm00.local ceph-mon[47668]: pgmap v1670: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:22 vm08.local ceph-mon[56824]: pgmap v1670: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:25 vm00.local ceph-mon[47668]: pgmap v1671: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:25 vm08.local ceph-mon[56824]: pgmap v1671: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:25.750 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:25.751 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:25.776 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:25.777 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:27.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:27 vm00.local ceph-mon[47668]: pgmap v1672: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:27 vm08.local ceph-mon[56824]: pgmap v1672: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:29 vm00.local ceph-mon[47668]: pgmap v1673: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:29.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:29 vm08.local ceph-mon[56824]: pgmap v1673: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:30.779 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:30.779 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:30.806 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:30.806 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:31.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:31 vm00.local ceph-mon[47668]: pgmap v1674: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:31 vm08.local ceph-mon[56824]: pgmap v1674: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:33.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:32 vm00.local ceph-mon[47668]: pgmap v1675: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:32 vm08.local ceph-mon[56824]: pgmap v1675: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:35 vm00.local ceph-mon[47668]: pgmap v1676: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:35 vm08.local ceph-mon[56824]: pgmap v1676: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:35.808 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:35.808 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:35.834 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:35.835 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:37 vm08.local ceph-mon[56824]: pgmap v1677: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:37.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:59:37.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:37 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:59:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:37 vm00.local ceph-mon[47668]: pgmap v1677: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:59:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:37 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:59:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:59:38.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:59:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:59:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-08T23:59:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:39 vm08.local ceph-mon[56824]: pgmap v1678: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:39 vm00.local ceph-mon[47668]: pgmap v1678: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:40.836 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:40.837 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:40.862 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:40.862 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:41 vm08.local ceph-mon[56824]: pgmap v1679: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:41 vm00.local ceph-mon[47668]: pgmap v1679: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:43.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:42 vm00.local ceph-mon[47668]: pgmap v1680: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:42 vm08.local ceph-mon[56824]: pgmap v1680: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:45 vm00.local ceph-mon[47668]: pgmap v1681: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:45 vm08.local ceph-mon[56824]: pgmap v1681: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:45.864 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:45.864 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:45.891 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:45.892 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:47 vm08.local ceph-mon[56824]: pgmap v1682: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:47 vm00.local ceph-mon[47668]: pgmap v1682: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:49 vm08.local ceph-mon[56824]: pgmap v1683: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:49 vm00.local ceph-mon[47668]: pgmap v1683: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:50.893 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:50.894 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:50.920 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:50.920 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:51 vm08.local ceph-mon[56824]: pgmap v1684: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:51 vm00.local ceph-mon[47668]: pgmap v1684: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:52 vm08.local ceph-mon[56824]: pgmap v1685: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:53.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:52 vm00.local ceph-mon[47668]: pgmap v1685: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:55 vm00.local ceph-mon[47668]: pgmap v1686: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:55 vm08.local ceph-mon[56824]: pgmap v1686: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:55.921 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-08T23:59:55.922 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-08T23:59:55.948 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-08T23:59:55.948 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-08T23:59:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:57 vm00.local ceph-mon[47668]: pgmap v1687: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:57 vm08.local ceph-mon[56824]: pgmap v1687: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-08T23:59:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 08 23:59:59 vm00.local ceph-mon[47668]: pgmap v1688: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-08T23:59:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 08 23:59:59 vm08.local ceph-mon[56824]: pgmap v1688: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:00.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:00 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-09T00:00:00.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:00 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-09T00:00:00.950 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:00.951 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:00.987 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:00.988 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:01.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:01 vm00.local ceph-mon[47668]: pgmap v1689: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:01 vm08.local ceph-mon[56824]: pgmap v1689: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:02 vm08.local ceph-mon[56824]: pgmap v1690: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:03.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:02 vm00.local ceph-mon[47668]: pgmap v1690: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:05.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:05 vm00.local ceph-mon[47668]: pgmap v1691: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:05 vm08.local ceph-mon[56824]: pgmap v1691: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:05.990 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:05.991 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:06.207 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:06.208 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:07.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:07 vm08.local ceph-mon[56824]: pgmap v1692: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:07 vm00.local ceph-mon[47668]: pgmap v1692: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:09 vm08.local ceph-mon[56824]: pgmap v1693: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:09 vm00.local ceph-mon[47668]: pgmap v1693: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:11.209 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:11.210 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:11.237 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:11.237 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:11 vm08.local ceph-mon[56824]: pgmap v1694: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:11 vm00.local ceph-mon[47668]: pgmap v1694: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:12 vm08.local ceph-mon[56824]: pgmap v1695: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:13.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:12 vm00.local ceph-mon[47668]: pgmap v1695: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:15 vm00.local ceph-mon[47668]: pgmap v1696: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:15 vm08.local ceph-mon[56824]: pgmap v1696: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:16.239 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:16.239 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:16.265 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:16.266 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:17.249 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:17 vm08.local ceph-mon[56824]: pgmap v1697: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:17 vm00.local ceph-mon[47668]: pgmap v1697: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:19.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:19 vm00.local ceph-mon[47668]: pgmap v1698: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:19 vm08.local ceph-mon[56824]: pgmap v1698: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:21.268 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:21.268 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:21.357 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:21.358 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:21 vm08.local ceph-mon[56824]: pgmap v1699: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:21 vm00.local ceph-mon[47668]: pgmap v1699: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:23.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:22 vm08.local ceph-mon[56824]: pgmap v1700: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:23.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:22 vm00.local ceph-mon[47668]: pgmap v1700: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:25 vm00.local ceph-mon[47668]: pgmap v1701: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:25 vm08.local ceph-mon[56824]: pgmap v1701: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:26.359 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:26.360 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:26.535 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:26.536 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:27 vm00.local ceph-mon[47668]: pgmap v1702: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:27 vm08.local ceph-mon[56824]: pgmap v1702: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:29.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:29 vm08.local ceph-mon[56824]: pgmap v1703: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:29 vm00.local ceph-mon[47668]: pgmap v1703: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:31.537 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:31.538 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:31.591 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:31.592 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:31 vm08.local ceph-mon[56824]: pgmap v1704: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:31 vm00.local ceph-mon[47668]: pgmap v1704: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:33.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:32 vm08.local ceph-mon[56824]: pgmap v1705: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:33.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:32 vm00.local ceph-mon[47668]: pgmap v1705: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:35.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:35 vm00.local ceph-mon[47668]: pgmap v1706: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:35 vm08.local ceph-mon[56824]: pgmap v1706: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:36.593 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:36.594 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:36.621 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:36.621 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:37.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:37 vm00.local ceph-mon[47668]: pgmap v1707: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:37.516 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:37 vm08.local ceph-mon[56824]: pgmap v1707: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:38.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:00:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:00:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:00:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:00:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:00:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:00:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:00:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:00:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:39 vm00.local ceph-mon[47668]: pgmap v1708: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:39 vm08.local ceph-mon[56824]: pgmap v1708: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:41.623 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:41.624 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:41 vm08.local ceph-mon[56824]: pgmap v1709: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:41.650 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:41.651 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:41 vm00.local ceph-mon[47668]: pgmap v1709: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:42 vm08.local ceph-mon[56824]: pgmap v1710: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:43.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:42 vm00.local ceph-mon[47668]: pgmap v1710: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:45.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:45 vm00.local ceph-mon[47668]: pgmap v1711: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:45 vm08.local ceph-mon[56824]: pgmap v1711: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:46.652 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:46.653 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:46.682 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:46.682 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:47 vm08.local ceph-mon[56824]: pgmap v1712: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:47 vm00.local ceph-mon[47668]: pgmap v1712: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:49 vm08.local ceph-mon[56824]: pgmap v1713: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:49 vm00.local ceph-mon[47668]: pgmap v1713: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:51 vm08.local ceph-mon[56824]: pgmap v1714: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:51 vm00.local ceph-mon[47668]: pgmap v1714: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:51.683 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:51.684 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:51.711 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:51.712 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:53 vm08.local ceph-mon[56824]: pgmap v1715: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:53 vm00.local ceph-mon[47668]: pgmap v1715: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:55.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:55 vm00.local ceph-mon[47668]: pgmap v1716: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:55.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:55 vm08.local ceph-mon[56824]: pgmap v1716: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:56.713 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:00:56.714 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:00:56.739 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:00:56.739 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:00:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:57 vm00.local ceph-mon[47668]: pgmap v1717: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:57 vm08.local ceph-mon[56824]: pgmap v1717: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:00:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:00:59 vm00.local ceph-mon[47668]: pgmap v1718: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:00:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:00:59 vm08.local ceph-mon[56824]: pgmap v1718: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:01 vm00.local ceph-mon[47668]: pgmap v1719: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:01 vm08.local ceph-mon[56824]: pgmap v1719: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:01.741 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:01.741 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:01.766 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:01.767 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:03.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:03 vm08.local ceph-mon[56824]: pgmap v1720: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:03 vm00.local ceph-mon[47668]: pgmap v1720: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:05.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:05 vm00.local ceph-mon[47668]: pgmap v1721: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:05 vm08.local ceph-mon[56824]: pgmap v1721: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:06.768 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:06.769 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:06.864 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:06.865 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:07.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:07 vm08.local ceph-mon[56824]: pgmap v1722: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:07 vm00.local ceph-mon[47668]: pgmap v1722: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:09 vm08.local ceph-mon[56824]: pgmap v1723: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:09 vm00.local ceph-mon[47668]: pgmap v1723: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:10.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:10 vm08.local ceph-mon[56824]: pgmap v1724: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:10.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:10 vm00.local ceph-mon[47668]: pgmap v1724: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:11.866 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:11.867 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:11.893 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:11.894 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:13.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:13 vm08.local ceph-mon[56824]: pgmap v1725: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:13 vm00.local ceph-mon[47668]: pgmap v1725: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:15 vm00.local ceph-mon[47668]: pgmap v1726: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:15 vm08.local ceph-mon[56824]: pgmap v1726: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:16.895 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:16.896 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:16.922 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:16.922 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:17 vm00.local ceph-mon[47668]: pgmap v1727: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:17 vm08.local ceph-mon[56824]: pgmap v1727: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:19 vm00.local ceph-mon[47668]: pgmap v1728: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:19 vm08.local ceph-mon[56824]: pgmap v1728: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:21.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:21 vm00.local ceph-mon[47668]: pgmap v1729: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:21 vm08.local ceph-mon[56824]: pgmap v1729: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:21.924 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:21.924 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:21.950 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:21.951 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:23 vm08.local ceph-mon[56824]: pgmap v1730: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:23 vm00.local ceph-mon[47668]: pgmap v1730: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:25.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:25 vm00.local ceph-mon[47668]: pgmap v1731: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:25 vm08.local ceph-mon[56824]: pgmap v1731: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:26.952 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:26.953 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:26.979 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:26.979 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:27.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:27 vm00.local ceph-mon[47668]: pgmap v1732: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:27 vm08.local ceph-mon[56824]: pgmap v1732: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:29 vm00.local ceph-mon[47668]: pgmap v1733: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:29 vm08.local ceph-mon[56824]: pgmap v1733: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:31 vm08.local ceph-mon[56824]: pgmap v1734: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:31 vm00.local ceph-mon[47668]: pgmap v1734: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:31.981 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:31.981 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:32.008 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:32.008 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:33 vm08.local ceph-mon[56824]: pgmap v1735: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:33 vm00.local ceph-mon[47668]: pgmap v1735: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:35.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:35 vm00.local ceph-mon[47668]: pgmap v1736: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:35 vm08.local ceph-mon[56824]: pgmap v1736: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:37.010 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:37.011 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:37.038 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:37.039 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:37.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:37 vm00.local ceph-mon[47668]: pgmap v1737: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:37.560 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:37 vm08.local ceph-mon[56824]: pgmap v1737: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:38.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:01:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:01:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:01:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:01:38.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:01:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:01:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:01:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:01:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:01:38.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:01:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:39 vm00.local ceph-mon[47668]: pgmap v1738: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:39.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:39 vm08.local ceph-mon[56824]: pgmap v1738: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:41 vm08.local ceph-mon[56824]: pgmap v1739: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:41 vm00.local ceph-mon[47668]: pgmap v1739: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:42.040 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:42.041 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:42.067 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:42.067 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:43 vm08.local ceph-mon[56824]: pgmap v1740: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:43 vm00.local ceph-mon[47668]: pgmap v1740: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:45 vm00.local ceph-mon[47668]: pgmap v1741: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:45 vm08.local ceph-mon[56824]: pgmap v1741: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:47.069 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:47.069 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:47.095 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:47.096 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:47.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:47 vm00.local ceph-mon[47668]: pgmap v1742: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:47 vm08.local ceph-mon[56824]: pgmap v1742: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:49 vm00.local ceph-mon[47668]: pgmap v1743: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:49 vm08.local ceph-mon[56824]: pgmap v1743: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:51 vm08.local ceph-mon[56824]: pgmap v1744: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:51 vm00.local ceph-mon[47668]: pgmap v1744: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:52.097 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:52.098 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:52.124 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:52.124 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:53.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:53 vm08.local ceph-mon[56824]: pgmap v1745: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:53 vm00.local ceph-mon[47668]: pgmap v1745: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:55 vm00.local ceph-mon[47668]: pgmap v1746: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:55 vm08.local ceph-mon[56824]: pgmap v1746: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:57.126 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:01:57.126 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:01:57.154 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:01:57.155 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:01:57.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:57 vm00.local ceph-mon[47668]: pgmap v1747: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:57 vm08.local ceph-mon[56824]: pgmap v1747: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:01:59.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:01:59 vm08.local ceph-mon[56824]: pgmap v1748: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:01:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:01:59 vm00.local ceph-mon[47668]: pgmap v1748: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:01 vm08.local ceph-mon[56824]: pgmap v1749: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:01 vm00.local ceph-mon[47668]: pgmap v1749: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:02.156 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:02.157 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:02.183 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:02.183 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:03 vm00.local ceph-mon[47668]: pgmap v1750: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:03 vm08.local ceph-mon[56824]: pgmap v1750: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:05 vm08.local ceph-mon[56824]: pgmap v1751: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:05 vm00.local ceph-mon[47668]: pgmap v1751: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:07.184 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:07.185 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:07.211 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:07.211 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:07 vm08.local ceph-mon[56824]: pgmap v1752: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:07 vm00.local ceph-mon[47668]: pgmap v1752: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:09 vm08.local ceph-mon[56824]: pgmap v1753: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:09 vm00.local ceph-mon[47668]: pgmap v1753: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:11 vm08.local ceph-mon[56824]: pgmap v1754: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:11 vm00.local ceph-mon[47668]: pgmap v1754: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:12.212 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:12.213 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:12.238 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:12.239 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:13 vm08.local ceph-mon[56824]: pgmap v1755: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:13.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:13 vm00.local ceph-mon[47668]: pgmap v1755: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:15 vm00.local ceph-mon[47668]: pgmap v1756: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:15 vm08.local ceph-mon[56824]: pgmap v1756: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:17.240 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:17.241 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:17.266 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:17.267 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:17.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:17 vm00.local ceph-mon[47668]: pgmap v1757: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:17 vm08.local ceph-mon[56824]: pgmap v1757: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:19 vm08.local ceph-mon[56824]: pgmap v1758: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:19 vm00.local ceph-mon[47668]: pgmap v1758: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:21 vm08.local ceph-mon[56824]: pgmap v1759: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:21 vm00.local ceph-mon[47668]: pgmap v1759: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:22.268 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:22.269 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:22.296 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:22.296 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:23.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:23 vm08.local ceph-mon[56824]: pgmap v1760: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:23 vm00.local ceph-mon[47668]: pgmap v1760: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:25 vm00.local ceph-mon[47668]: pgmap v1761: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:25 vm08.local ceph-mon[56824]: pgmap v1761: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:27.298 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:27.298 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:27.323 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:27.324 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:27 vm08.local ceph-mon[56824]: pgmap v1762: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:27 vm00.local ceph-mon[47668]: pgmap v1762: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:29 vm00.local ceph-mon[47668]: pgmap v1763: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:29 vm08.local ceph-mon[56824]: pgmap v1763: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:31 vm00.local ceph-mon[47668]: pgmap v1764: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:31 vm08.local ceph-mon[56824]: pgmap v1764: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:32.325 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:32.326 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:32.351 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:32.352 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:33.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:33 vm08.local ceph-mon[56824]: pgmap v1765: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:33 vm00.local ceph-mon[47668]: pgmap v1765: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:35 vm08.local ceph-mon[56824]: pgmap v1766: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:35 vm00.local ceph-mon[47668]: pgmap v1766: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:37.353 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:37.354 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:37.442 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:37.443 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:37 vm08.local ceph-mon[56824]: pgmap v1767: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:37 vm00.local ceph-mon[47668]: pgmap v1767: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:02:38.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:02:38.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:02:38.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:02:38.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:02:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:02:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:02:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:02:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:02:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:02:39.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:39 vm08.local ceph-mon[56824]: pgmap v1768: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:39 vm00.local ceph-mon[47668]: pgmap v1768: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:41 vm08.local ceph-mon[56824]: pgmap v1769: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:41 vm00.local ceph-mon[47668]: pgmap v1769: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:42.444 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:42.445 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:42.471 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:42.471 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:43.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:43 vm08.local ceph-mon[56824]: pgmap v1770: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:43 vm00.local ceph-mon[47668]: pgmap v1770: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:45 vm08.local ceph-mon[56824]: pgmap v1771: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:45 vm00.local ceph-mon[47668]: pgmap v1771: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:47.473 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:47.473 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:47.499 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:47.500 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:47 vm08.local ceph-mon[56824]: pgmap v1772: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:47 vm00.local ceph-mon[47668]: pgmap v1772: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:49.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:49 vm08.local ceph-mon[56824]: pgmap v1773: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:49 vm00.local ceph-mon[47668]: pgmap v1773: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:51 vm08.local ceph-mon[56824]: pgmap v1774: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:51 vm00.local ceph-mon[47668]: pgmap v1774: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:52.501 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:52.501 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:52.527 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:52.527 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:53 vm00.local ceph-mon[47668]: pgmap v1775: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:53 vm08.local ceph-mon[56824]: pgmap v1775: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:55 vm08.local ceph-mon[56824]: pgmap v1776: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:55 vm00.local ceph-mon[47668]: pgmap v1776: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:57.528 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:02:57.529 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:02:57.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:02:57.572 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:02:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:57 vm00.local ceph-mon[47668]: pgmap v1777: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:57 vm08.local ceph-mon[56824]: pgmap v1777: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:02:59.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:02:59 vm00.local ceph-mon[47668]: pgmap v1778: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:02:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:02:59 vm08.local ceph-mon[56824]: pgmap v1778: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:01 vm08.local ceph-mon[56824]: pgmap v1779: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:01 vm00.local ceph-mon[47668]: pgmap v1779: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:02.574 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:02.574 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:02.600 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:02.601 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:03.790 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:03 vm00.local ceph-mon[47668]: pgmap v1780: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:03 vm08.local ceph-mon[56824]: pgmap v1780: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:05.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:05 vm08.local ceph-mon[56824]: pgmap v1781: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:05 vm00.local ceph-mon[47668]: pgmap v1781: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:07.602 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:07.603 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:07.629 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:07.630 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:07 vm08.local ceph-mon[56824]: pgmap v1782: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:07 vm00.local ceph-mon[47668]: pgmap v1782: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:08.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:08 vm08.local ceph-mon[56824]: pgmap v1783: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:08.924 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:08 vm00.local ceph-mon[47668]: pgmap v1783: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:11.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:11 vm08.local ceph-mon[56824]: pgmap v1784: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:11 vm00.local ceph-mon[47668]: pgmap v1784: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:12.632 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:12.632 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:12.773 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:12.773 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:13.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:13 vm08.local ceph-mon[56824]: pgmap v1785: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:13 vm00.local ceph-mon[47668]: pgmap v1785: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:15 vm00.local ceph-mon[47668]: pgmap v1786: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:15 vm08.local ceph-mon[56824]: pgmap v1786: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:17.774 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:17.775 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:17.903 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:17.904 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:17 vm00.local ceph-mon[47668]: pgmap v1787: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:18.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:17 vm08.local ceph-mon[56824]: pgmap v1787: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:18.924 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:18 vm00.local ceph-mon[47668]: pgmap v1788: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:18 vm08.local ceph-mon[56824]: pgmap v1788: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:21 vm08.local ceph-mon[56824]: pgmap v1789: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:21 vm00.local ceph-mon[47668]: pgmap v1789: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:22.905 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:22.906 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:22.932 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:22.933 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:23 vm00.local ceph-mon[47668]: pgmap v1790: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:23 vm08.local ceph-mon[56824]: pgmap v1790: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:24 vm08.local ceph-mon[56824]: pgmap v1791: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:24 vm00.local ceph-mon[47668]: pgmap v1791: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:27 vm08.local ceph-mon[56824]: pgmap v1792: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:27.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:27 vm00.local ceph-mon[47668]: pgmap v1792: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:27.935 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:27.935 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:27.969 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:27.969 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:29.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:29 vm08.local ceph-mon[56824]: pgmap v1793: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:29.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:29 vm00.local ceph-mon[47668]: pgmap v1793: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:31 vm00.local ceph-mon[47668]: pgmap v1794: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:31 vm08.local ceph-mon[56824]: pgmap v1794: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:32.971 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:32.972 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:33.042 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:33.042 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:33.793 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:33 vm00.local ceph-mon[47668]: pgmap v1795: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:33 vm08.local ceph-mon[56824]: pgmap v1795: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:34.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:34 vm08.local ceph-mon[56824]: pgmap v1796: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:34 vm00.local ceph-mon[47668]: pgmap v1796: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:37 vm08.local ceph-mon[56824]: pgmap v1797: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:37 vm00.local ceph-mon[47668]: pgmap v1797: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:38.044 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:38.044 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:38.089 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:38.089 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:38.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:38.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:38.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:38.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:03:38.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:03:38.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:38.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:38.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:38.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:03:38.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:03:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:39 vm08.local ceph-mon[56824]: pgmap v1798: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:39 vm00.local ceph-mon[47668]: pgmap v1798: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:40 vm08.local ceph-mon[56824]: pgmap v1799: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:40.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:40 vm00.local ceph-mon[47668]: pgmap v1799: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:43.091 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:43.091 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:43.118 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:43.119 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:43 vm08.local ceph-mon[56824]: pgmap v1800: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:43.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:43 vm00.local ceph-mon[47668]: pgmap v1800: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:45 vm00.local ceph-mon[47668]: pgmap v1801: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:45 vm08.local ceph-mon[56824]: pgmap v1801: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:47 vm08.local ceph-mon[56824]: pgmap v1802: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:47.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:47 vm00.local ceph-mon[47668]: pgmap v1802: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:48.120 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:48.120 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:48.161 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:48.162 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:48 vm08.local ceph-mon[56824]: pgmap v1803: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:48.928 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:48 vm00.local ceph-mon[47668]: pgmap v1803: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:51 vm08.local ceph-mon[56824]: pgmap v1804: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:51.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:51 vm00.local ceph-mon[47668]: pgmap v1804: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:53.165 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:53.165 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:53.245 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:53.245 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:53.794 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:53 vm00.local ceph-mon[47668]: pgmap v1805: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:53 vm08.local ceph-mon[56824]: pgmap v1805: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:54 vm08.local ceph-mon[56824]: pgmap v1806: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:54 vm00.local ceph-mon[47668]: pgmap v1806: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:57 vm08.local ceph-mon[56824]: pgmap v1807: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:57 vm00.local ceph-mon[47668]: pgmap v1807: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:03:58.247 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:03:58.247 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:03:58.314 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:03:58.315 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:03:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:03:59 vm08.local ceph-mon[56824]: pgmap v1808: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:03:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:03:59 vm00.local ceph-mon[47668]: pgmap v1808: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:01 vm08.local ceph-mon[56824]: pgmap v1809: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:01 vm00.local ceph-mon[47668]: pgmap v1809: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:03.317 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:03.317 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:03.345 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:03.346 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:03.796 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:03 vm00.local ceph-mon[47668]: pgmap v1810: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:03 vm08.local ceph-mon[56824]: pgmap v1810: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:04.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:04 vm08.local ceph-mon[56824]: pgmap v1811: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:04.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:04 vm00.local ceph-mon[47668]: pgmap v1811: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:04:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:04:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:04:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:04:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:06 vm08.local ceph-mon[56824]: pgmap v1812: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:06 vm00.local ceph-mon[47668]: pgmap v1812: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:08.347 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:08.348 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:08.409 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:08.409 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:08.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:08 vm08.local ceph-mon[56824]: pgmap v1813: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:08.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:08 vm00.local ceph-mon[47668]: pgmap v1813: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:11 vm08.local ceph-mon[56824]: pgmap v1814: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:11 vm00.local ceph-mon[47668]: pgmap v1814: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:13.411 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:13.411 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:13.450 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:13.451 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:13.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:13 vm00.local ceph-mon[47668]: pgmap v1815: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:13 vm08.local ceph-mon[56824]: pgmap v1815: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:15 vm08.local ceph-mon[56824]: pgmap v1816: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:15 vm00.local ceph-mon[47668]: pgmap v1816: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:16.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:16 vm08.local ceph-mon[56824]: pgmap v1817: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:16 vm00.local ceph-mon[47668]: pgmap v1817: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:18.453 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:18.453 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:18.481 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:18.482 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:18.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:18 vm08.local ceph-mon[56824]: pgmap v1818: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:18.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:18 vm00.local ceph-mon[47668]: pgmap v1818: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:21.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:21 vm08.local ceph-mon[56824]: pgmap v1819: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:21 vm00.local ceph-mon[47668]: pgmap v1819: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:23 vm00.local ceph-mon[47668]: pgmap v1820: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:23.483 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:23.484 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:23.512 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:23.513 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:23 vm08.local ceph-mon[56824]: pgmap v1820: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:25 vm08.local ceph-mon[56824]: pgmap v1821: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:25 vm00.local ceph-mon[47668]: pgmap v1821: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:27 vm08.local ceph-mon[56824]: pgmap v1822: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:27 vm00.local ceph-mon[47668]: pgmap v1822: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:28.514 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:28.515 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:28.541 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:28.541 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:28.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:28 vm08.local ceph-mon[56824]: pgmap v1823: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:28.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:28 vm00.local ceph-mon[47668]: pgmap v1823: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:31 vm08.local ceph-mon[56824]: pgmap v1824: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:31 vm00.local ceph-mon[47668]: pgmap v1824: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:33 vm00.local ceph-mon[47668]: pgmap v1825: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:33.542 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:33.543 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:33.570 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:33.571 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:33 vm08.local ceph-mon[56824]: pgmap v1825: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:35 vm08.local ceph-mon[56824]: pgmap v1826: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:35 vm00.local ceph-mon[47668]: pgmap v1826: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:37 vm08.local ceph-mon[56824]: pgmap v1827: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:37 vm00.local ceph-mon[47668]: pgmap v1827: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:38.572 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:38.573 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:38.611 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:38.612 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:38 vm08.local ceph-mon[56824]: pgmap v1828: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:04:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:04:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:04:38.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:38 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:04:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:38 vm00.local ceph-mon[47668]: pgmap v1828: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:04:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:04:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:04:38.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:38 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:04:40.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:04:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:04:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:04:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:04:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:40 vm08.local ceph-mon[56824]: pgmap v1829: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:40 vm00.local ceph-mon[47668]: pgmap v1829: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:43.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:43 vm00.local ceph-mon[47668]: pgmap v1830: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:43.613 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:43.614 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:43 vm08.local ceph-mon[56824]: pgmap v1830: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:43.643 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:43.643 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:45 vm08.local ceph-mon[56824]: pgmap v1831: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:45 vm00.local ceph-mon[47668]: pgmap v1831: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:47 vm08.local ceph-mon[56824]: pgmap v1832: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:47 vm00.local ceph-mon[47668]: pgmap v1832: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:48.645 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:48.645 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:48.671 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:48.671 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:48 vm08.local ceph-mon[56824]: pgmap v1833: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:48 vm00.local ceph-mon[47668]: pgmap v1833: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:51 vm08.local ceph-mon[56824]: pgmap v1834: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:51 vm00.local ceph-mon[47668]: pgmap v1834: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:53 vm00.local ceph-mon[47668]: pgmap v1835: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:53 vm08.local ceph-mon[56824]: pgmap v1835: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:53.672 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:53.673 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:53.698 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:53.699 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:55 vm08.local ceph-mon[56824]: pgmap v1836: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:55 vm00.local ceph-mon[47668]: pgmap v1836: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:57 vm08.local ceph-mon[56824]: pgmap v1837: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:57 vm00.local ceph-mon[47668]: pgmap v1837: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:04:58.700 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:04:58.700 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:04:58.725 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:04:58.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:04:58.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:04:58 vm08.local ceph-mon[56824]: pgmap v1838: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:04:58.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:04:58 vm00.local ceph-mon[47668]: pgmap v1838: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:01 vm08.local ceph-mon[56824]: pgmap v1839: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:01 vm00.local ceph-mon[47668]: pgmap v1839: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:03 vm00.local ceph-mon[47668]: pgmap v1840: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:03 vm08.local ceph-mon[56824]: pgmap v1840: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:03.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:03.727 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:03.754 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:03.754 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:05 vm08.local ceph-mon[56824]: pgmap v1841: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:05 vm00.local ceph-mon[47668]: pgmap v1841: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:05:06.478 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:05:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:05:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:05:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:07 vm08.local ceph-mon[56824]: pgmap v1842: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:07 vm00.local ceph-mon[47668]: pgmap v1842: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:08.756 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:08.756 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:08.785 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:08.785 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:08.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:08 vm08.local ceph-mon[56824]: pgmap v1843: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:08.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:08 vm00.local ceph-mon[47668]: pgmap v1843: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:11 vm08.local ceph-mon[56824]: pgmap v1844: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:11 vm00.local ceph-mon[47668]: pgmap v1844: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:13.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:13 vm00.local ceph-mon[47668]: pgmap v1845: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:13.786 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:13.787 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:13.857 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:13.858 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:13 vm08.local ceph-mon[56824]: pgmap v1845: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:15 vm00.local ceph-mon[47668]: pgmap v1846: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:15 vm08.local ceph-mon[56824]: pgmap v1846: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:17 vm00.local ceph-mon[47668]: pgmap v1847: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:17 vm08.local ceph-mon[56824]: pgmap v1847: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:18.859 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:18.859 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:18.885 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:18.886 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:18.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:18 vm00.local ceph-mon[47668]: pgmap v1848: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:19.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:18 vm08.local ceph-mon[56824]: pgmap v1848: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:21.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:21 vm08.local ceph-mon[56824]: pgmap v1849: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:21 vm00.local ceph-mon[47668]: pgmap v1849: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:23 vm00.local ceph-mon[47668]: pgmap v1850: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:23 vm08.local ceph-mon[56824]: pgmap v1850: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:23.887 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:23.888 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:23.921 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:23.921 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:25 vm08.local ceph-mon[56824]: pgmap v1851: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:25 vm00.local ceph-mon[47668]: pgmap v1851: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:27 vm08.local ceph-mon[56824]: pgmap v1852: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:27 vm00.local ceph-mon[47668]: pgmap v1852: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:28.923 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:28.924 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:28.950 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:28.951 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:28 vm08.local ceph-mon[56824]: pgmap v1853: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:28 vm00.local ceph-mon[47668]: pgmap v1853: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:31 vm08.local ceph-mon[56824]: pgmap v1854: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:31 vm00.local ceph-mon[47668]: pgmap v1854: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:33 vm00.local ceph-mon[47668]: pgmap v1855: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:33.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:33 vm08.local ceph-mon[56824]: pgmap v1855: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:33.953 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:33.953 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:33.979 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:33.979 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:35.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:35 vm08.local ceph-mon[56824]: pgmap v1856: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:35.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:35 vm00.local ceph-mon[47668]: pgmap v1856: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:37 vm08.local ceph-mon[56824]: pgmap v1857: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:37 vm00.local ceph-mon[47668]: pgmap v1857: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:38.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:38 vm00.local ceph-mon[47668]: pgmap v1858: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:38.980 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:38.981 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:38.991 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:38 vm08.local ceph-mon[56824]: pgmap v1858: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:39.017 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:39.018 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:05:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:05:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:05:40.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:40.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:05:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:40 vm08.local ceph-mon[56824]: pgmap v1859: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:40 vm00.local ceph-mon[47668]: pgmap v1859: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:43.613 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:43 vm00.local ceph-mon[47668]: pgmap v1860: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:43 vm08.local ceph-mon[56824]: pgmap v1860: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:44.020 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:44.021 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:44.048 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:44.049 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:45 vm08.local ceph-mon[56824]: pgmap v1861: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:45 vm00.local ceph-mon[47668]: pgmap v1861: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:47 vm08.local ceph-mon[56824]: pgmap v1862: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:47 vm00.local ceph-mon[47668]: pgmap v1862: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:48.938 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:48 vm00.local ceph-mon[47668]: pgmap v1863: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:49.050 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:49.051 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:49.078 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:49.078 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:48 vm08.local ceph-mon[56824]: pgmap v1863: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:51.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:51 vm08.local ceph-mon[56824]: pgmap v1864: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:51 vm00.local ceph-mon[47668]: pgmap v1864: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:53 vm00.local ceph-mon[47668]: pgmap v1865: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:53 vm08.local ceph-mon[56824]: pgmap v1865: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:54.080 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:54.080 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:54.105 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:54.106 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:55 vm08.local ceph-mon[56824]: pgmap v1866: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:55 vm00.local ceph-mon[47668]: pgmap v1866: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:57 vm08.local ceph-mon[56824]: pgmap v1867: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:57 vm00.local ceph-mon[47668]: pgmap v1867: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:05:58.938 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:05:58 vm00.local ceph-mon[47668]: pgmap v1868: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:59.107 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:05:59.108 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:05:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:05:58 vm08.local ceph-mon[56824]: pgmap v1868: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:05:59.134 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:05:59.134 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:05:59.894 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92297]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-09T00:05:59.894 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92297]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:05:59.894 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92297]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:05:59.894 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92294]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-09T00:05:59.894 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92294]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:05:59.894 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92294]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:00.180 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 09 00:06:00 vm00.local sudo[92303]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdb 2026-03-09T00:06:00.180 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 09 00:06:00 vm00.local sudo[92303]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:00.180 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 09 00:06:00 vm00.local sudo[92303]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:00.180 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92300]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-09T00:06:00.180 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92300]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:00.180 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 09 00:05:59 vm00.local sudo[92300]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:00.606 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78213]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-09T00:06:00.606 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78213]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:00.606 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78213]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:00.606 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78216]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-09T00:06:00.606 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78216]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:00.606 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78216]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:00.877 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78222]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdb 2026-03-09T00:06:00.877 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78222]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:00.877 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78222]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:00.877 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78219]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-09T00:06:00.877 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78219]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:00.877 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 09 00:06:00 vm08.local sudo[78219]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:01.222 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:00 vm00.local sudo[92306]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T00:06:01.222 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:00 vm00.local sudo[92306]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:01.222 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:00 vm00.local sudo[92306]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local sudo[78225]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local sudo[78225]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local sudo[78225]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: pgmap v1869: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-09T00:06:01.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:01 vm08.local ceph-mon[56824]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: pgmap v1869: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm00"}]: dispatch 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-09T00:06:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:01 vm00.local ceph-mon[47668]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:06:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:03 vm00.local ceph-mon[47668]: pgmap v1870: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:03 vm08.local ceph-mon[56824]: pgmap v1870: 97 pgs: 97 active+clean; 453 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:04.136 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:04.136 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:04.163 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:04.163 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:05 vm08.local ceph-mon[56824]: pgmap v1871: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:05 vm00.local ceph-mon[47668]: pgmap v1871: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:06:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:06:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:06:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:06:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:07 vm08.local ceph-mon[56824]: pgmap v1872: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:07 vm00.local ceph-mon[47668]: pgmap v1872: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:08.940 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:08 vm00.local ceph-mon[47668]: pgmap v1873: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:08 vm08.local ceph-mon[56824]: pgmap v1873: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:09.164 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:09.165 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:09.191 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:09.192 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:11 vm08.local ceph-mon[56824]: pgmap v1874: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:11 vm00.local ceph-mon[47668]: pgmap v1874: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:13 vm00.local ceph-mon[47668]: pgmap v1875: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:13 vm08.local ceph-mon[56824]: pgmap v1875: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:14.193 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:14.193 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:14.220 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:14.221 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:15 vm08.local ceph-mon[56824]: pgmap v1876: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:15 vm00.local ceph-mon[47668]: pgmap v1876: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:17 vm08.local ceph-mon[56824]: pgmap v1877: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:17 vm00.local ceph-mon[47668]: pgmap v1877: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 22 KiB/s wr, 2 op/s 2026-03-09T00:06:18.940 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:18 vm00.local ceph-mon[47668]: pgmap v1878: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:18 vm08.local ceph-mon[56824]: pgmap v1878: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:19.222 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:19.223 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:19.251 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:19.251 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:21 vm08.local ceph-mon[56824]: pgmap v1879: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:21 vm00.local ceph-mon[47668]: pgmap v1879: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:23 vm00.local ceph-mon[47668]: pgmap v1880: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:23 vm08.local ceph-mon[56824]: pgmap v1880: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:24.253 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:24.253 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:24.281 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:24.282 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:25 vm08.local ceph-mon[56824]: pgmap v1881: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:25 vm00.local ceph-mon[47668]: pgmap v1881: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:27 vm08.local ceph-mon[56824]: pgmap v1882: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:27 vm00.local ceph-mon[47668]: pgmap v1882: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:28.940 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:28 vm00.local ceph-mon[47668]: pgmap v1883: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:28 vm08.local ceph-mon[56824]: pgmap v1883: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:29.283 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:29.284 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:29.309 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:29.310 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:31 vm08.local ceph-mon[56824]: pgmap v1884: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:31 vm00.local ceph-mon[47668]: pgmap v1884: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:33 vm00.local ceph-mon[47668]: pgmap v1885: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:33 vm08.local ceph-mon[56824]: pgmap v1885: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:34.311 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:34.312 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:34.336 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:34.337 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:35 vm08.local ceph-mon[56824]: pgmap v1886: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:35 vm00.local ceph-mon[47668]: pgmap v1886: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:37 vm08.local ceph-mon[56824]: pgmap v1887: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:37 vm00.local ceph-mon[47668]: pgmap v1887: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:38 vm08.local ceph-mon[56824]: pgmap v1888: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:38 vm00.local ceph-mon[47668]: pgmap v1888: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:39.338 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:39.339 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:39.364 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:39.365 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:39.872 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:39.872 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:39.872 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:39 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:39 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:40 vm08.local ceph-mon[56824]: pgmap v1889: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:40 vm00.local ceph-mon[47668]: pgmap v1889: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:42.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:06:42.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:06:42.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:06:42.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:06:42.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:06:42.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:06:42.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:06:42.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:06:43.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:42 vm00.local ceph-mon[47668]: pgmap v1890: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:42 vm08.local ceph-mon[56824]: pgmap v1890: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:44.366 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:44.367 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:44.394 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:44.394 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:45 vm08.local ceph-mon[56824]: pgmap v1891: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:45 vm00.local ceph-mon[47668]: pgmap v1891: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:47 vm08.local ceph-mon[56824]: pgmap v1892: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:47 vm00.local ceph-mon[47668]: pgmap v1892: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:48 vm08.local ceph-mon[56824]: pgmap v1893: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:48 vm00.local ceph-mon[47668]: pgmap v1893: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:49.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:49.396 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:49.424 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:49.424 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:51 vm08.local ceph-mon[56824]: pgmap v1894: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:51 vm00.local ceph-mon[47668]: pgmap v1894: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:53 vm00.local ceph-mon[47668]: pgmap v1895: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:53 vm08.local ceph-mon[56824]: pgmap v1895: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:54.426 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:54.426 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:54.452 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:54.453 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:06:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:55 vm08.local ceph-mon[56824]: pgmap v1896: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:55 vm00.local ceph-mon[47668]: pgmap v1896: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:57 vm08.local ceph-mon[56824]: pgmap v1897: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:57 vm00.local ceph-mon[47668]: pgmap v1897: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:06:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:06:58 vm08.local ceph-mon[56824]: pgmap v1898: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:06:58 vm00.local ceph-mon[47668]: pgmap v1898: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:06:59.454 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:06:59.454 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:06:59.482 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:06:59.482 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:01 vm08.local ceph-mon[56824]: pgmap v1899: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:01 vm00.local ceph-mon[47668]: pgmap v1899: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:03 vm00.local ceph-mon[47668]: pgmap v1900: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:03 vm08.local ceph-mon[56824]: pgmap v1900: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:04.483 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:04.484 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:04.511 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:04.511 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:05 vm08.local ceph-mon[56824]: pgmap v1901: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:05 vm00.local ceph-mon[47668]: pgmap v1901: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:07:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:07:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:07:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:07:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:07 vm08.local ceph-mon[56824]: pgmap v1902: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:07 vm00.local ceph-mon[47668]: pgmap v1902: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:08 vm08.local ceph-mon[56824]: pgmap v1903: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:08 vm00.local ceph-mon[47668]: pgmap v1903: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:09.513 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:09.513 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:09.540 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:09.541 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:11 vm08.local ceph-mon[56824]: pgmap v1904: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:11 vm00.local ceph-mon[47668]: pgmap v1904: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:13 vm00.local ceph-mon[47668]: pgmap v1905: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:13 vm08.local ceph-mon[56824]: pgmap v1905: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:14.542 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:14.543 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:14.569 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:14.570 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:15 vm08.local ceph-mon[56824]: pgmap v1906: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:15 vm00.local ceph-mon[47668]: pgmap v1906: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:17 vm08.local ceph-mon[56824]: pgmap v1907: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:17 vm00.local ceph-mon[47668]: pgmap v1907: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:18 vm08.local ceph-mon[56824]: pgmap v1908: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:18 vm00.local ceph-mon[47668]: pgmap v1908: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:19.571 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:19.572 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:19.597 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:19.598 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:21 vm08.local ceph-mon[56824]: pgmap v1909: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:21 vm00.local ceph-mon[47668]: pgmap v1909: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:23 vm00.local ceph-mon[47668]: pgmap v1910: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:23 vm08.local ceph-mon[56824]: pgmap v1910: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:24.599 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:24.599 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:24.626 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:24.626 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:25 vm08.local ceph-mon[56824]: pgmap v1911: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:25 vm00.local ceph-mon[47668]: pgmap v1911: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:27 vm08.local ceph-mon[56824]: pgmap v1912: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:27 vm00.local ceph-mon[47668]: pgmap v1912: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:28 vm08.local ceph-mon[56824]: pgmap v1913: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:28 vm00.local ceph-mon[47668]: pgmap v1913: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:29.627 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:29.628 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:29.655 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:29.655 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:31 vm08.local ceph-mon[56824]: pgmap v1914: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:31 vm00.local ceph-mon[47668]: pgmap v1914: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:33 vm00.local ceph-mon[47668]: pgmap v1915: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:33 vm08.local ceph-mon[56824]: pgmap v1915: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:34.656 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:34.657 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:34.684 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:34.685 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:35 vm08.local ceph-mon[56824]: pgmap v1916: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:35 vm00.local ceph-mon[47668]: pgmap v1916: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:37 vm08.local ceph-mon[56824]: pgmap v1917: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:37 vm00.local ceph-mon[47668]: pgmap v1917: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:38 vm08.local ceph-mon[56824]: pgmap v1918: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:38 vm00.local ceph-mon[47668]: pgmap v1918: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:39.686 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:39.686 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:39.714 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:39.714 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:41 vm00.local ceph-mon[47668]: pgmap v1919: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:07:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:07:41.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:41 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:07:41.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:41 vm08.local ceph-mon[56824]: pgmap v1919: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:41.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:07:41.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:07:41.590 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:41 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:07:42.666 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:42 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:42.666 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:42 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:42.666 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:42 vm00.local ceph-mon[47668]: pgmap v1920: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:42.910 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:42 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:42.910 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:42 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:42.910 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:42 vm08.local ceph-mon[56824]: pgmap v1920: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:07:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:44.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:07:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:07:44.715 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:44.716 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:44.742 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:44.743 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:45 vm08.local ceph-mon[56824]: pgmap v1921: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:45.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:45 vm00.local ceph-mon[47668]: pgmap v1921: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:47 vm08.local ceph-mon[56824]: pgmap v1922: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:47 vm00.local ceph-mon[47668]: pgmap v1922: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:48 vm08.local ceph-mon[56824]: pgmap v1923: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:48 vm00.local ceph-mon[47668]: pgmap v1923: 97 pgs: 97 active+clean; 453 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:49.744 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:49.745 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:49.772 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:49.772 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:51 vm08.local ceph-mon[56824]: pgmap v1924: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:51 vm00.local ceph-mon[47668]: pgmap v1924: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:53 vm00.local ceph-mon[47668]: pgmap v1925: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:53 vm08.local ceph-mon[56824]: pgmap v1925: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:54.774 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:54.774 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:54.801 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:54.801 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:07:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:55 vm08.local ceph-mon[56824]: pgmap v1926: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:55 vm00.local ceph-mon[47668]: pgmap v1926: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:57 vm08.local ceph-mon[56824]: pgmap v1927: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:57 vm00.local ceph-mon[47668]: pgmap v1927: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:07:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:07:58 vm08.local ceph-mon[56824]: pgmap v1928: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:59.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:07:58 vm00.local ceph-mon[47668]: pgmap v1928: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:07:59.803 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:07:59.803 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:07:59.830 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:07:59.831 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:01 vm08.local ceph-mon[56824]: pgmap v1929: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:01 vm00.local ceph-mon[47668]: pgmap v1929: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:03.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:03 vm00.local ceph-mon[47668]: pgmap v1930: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:03.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:03 vm08.local ceph-mon[56824]: pgmap v1930: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:04.832 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:04.833 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:04.860 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:04.861 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:05.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:05 vm08.local ceph-mon[56824]: pgmap v1931: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:05 vm00.local ceph-mon[47668]: pgmap v1931: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:08:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:08:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:08:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:08:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:07 vm08.local ceph-mon[56824]: pgmap v1932: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:07 vm00.local ceph-mon[47668]: pgmap v1932: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:09.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:08 vm08.local ceph-mon[56824]: pgmap v1933: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:08 vm00.local ceph-mon[47668]: pgmap v1933: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:09.863 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:09.863 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:09.891 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:09.892 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:11 vm08.local ceph-mon[56824]: pgmap v1934: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:11 vm00.local ceph-mon[47668]: pgmap v1934: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:13.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:13 vm00.local ceph-mon[47668]: pgmap v1935: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:13 vm08.local ceph-mon[56824]: pgmap v1935: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:14.893 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:14.894 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:14.922 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:14.923 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:15 vm08.local ceph-mon[56824]: pgmap v1936: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:15 vm00.local ceph-mon[47668]: pgmap v1936: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:17.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:17 vm08.local ceph-mon[56824]: pgmap v1937: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:17 vm00.local ceph-mon[47668]: pgmap v1937: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:18 vm08.local ceph-mon[56824]: pgmap v1938: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:18 vm00.local ceph-mon[47668]: pgmap v1938: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:19.924 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:19.925 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:19.953 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:19.953 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:21.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:21 vm08.local ceph-mon[56824]: pgmap v1939: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:21 vm00.local ceph-mon[47668]: pgmap v1939: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:23.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:23 vm00.local ceph-mon[47668]: pgmap v1940: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:23.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:23 vm08.local ceph-mon[56824]: pgmap v1940: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:24.954 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:24.955 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:24.981 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:24.982 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:25 vm08.local ceph-mon[56824]: pgmap v1941: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:25 vm00.local ceph-mon[47668]: pgmap v1941: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:27.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:27 vm08.local ceph-mon[56824]: pgmap v1942: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:27 vm00.local ceph-mon[47668]: pgmap v1942: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:28 vm08.local ceph-mon[56824]: pgmap v1943: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:29.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:28 vm00.local ceph-mon[47668]: pgmap v1943: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:29.983 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:29.983 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:30.013 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:30.014 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:31.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:31 vm08.local ceph-mon[56824]: pgmap v1944: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:31.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:31 vm00.local ceph-mon[47668]: pgmap v1944: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:33.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:33 vm00.local ceph-mon[47668]: pgmap v1945: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:33 vm08.local ceph-mon[56824]: pgmap v1945: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:35.015 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:35.016 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:35.045 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:35.046 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:35.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:35 vm08.local ceph-mon[56824]: pgmap v1946: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:35 vm00.local ceph-mon[47668]: pgmap v1946: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:37 vm08.local ceph-mon[56824]: pgmap v1947: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:37 vm00.local ceph-mon[47668]: pgmap v1947: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:38 vm08.local ceph-mon[56824]: pgmap v1948: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:38 vm00.local ceph-mon[47668]: pgmap v1948: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:40.047 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:40.048 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:40.080 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:40.080 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:41 vm08.local ceph-mon[56824]: pgmap v1949: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:41 vm00.local ceph-mon[47668]: pgmap v1949: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:43 vm08.local ceph-mon[56824]: pgmap v1950: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:08:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:08:43.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:43 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:08:43.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:43 vm00.local ceph-mon[47668]: pgmap v1950: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:43.432 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:08:43.432 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:08:43.432 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:43 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:08:44.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:08:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:08:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:08:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:08:45.081 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:45.082 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:45.108 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:45.109 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:45 vm08.local ceph-mon[56824]: pgmap v1951: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:45 vm00.local ceph-mon[47668]: pgmap v1951: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:47 vm08.local ceph-mon[56824]: pgmap v1952: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:47 vm00.local ceph-mon[47668]: pgmap v1952: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:48 vm08.local ceph-mon[56824]: pgmap v1953: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:48 vm00.local ceph-mon[47668]: pgmap v1953: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:50.110 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:50.110 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:50.137 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:50.137 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:51.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:51 vm08.local ceph-mon[56824]: pgmap v1954: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:51 vm00.local ceph-mon[47668]: pgmap v1954: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:53.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:53 vm00.local ceph-mon[47668]: pgmap v1955: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:53.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:53 vm08.local ceph-mon[56824]: pgmap v1955: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:55.139 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:08:55.139 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:08:55.167 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:08:55.168 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:08:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:55 vm08.local ceph-mon[56824]: pgmap v1956: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:55 vm00.local ceph-mon[47668]: pgmap v1956: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:57.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:57 vm08.local ceph-mon[56824]: pgmap v1957: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:57.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:57 vm00.local ceph-mon[47668]: pgmap v1957: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:08:59.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:08:58 vm08.local ceph-mon[56824]: pgmap v1958: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:08:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:08:58 vm00.local ceph-mon[47668]: pgmap v1958: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:00.169 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:00.170 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:00.197 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:00.197 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:01 vm08.local ceph-mon[56824]: pgmap v1959: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:01 vm00.local ceph-mon[47668]: pgmap v1959: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:03.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:03 vm00.local ceph-mon[47668]: pgmap v1960: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:03 vm08.local ceph-mon[56824]: pgmap v1960: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:05.199 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:05.199 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:05.227 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:05.228 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:05.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:05 vm08.local ceph-mon[56824]: pgmap v1961: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:05 vm00.local ceph-mon[47668]: pgmap v1961: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:06.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:06.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:06.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:07 vm00.local ceph-mon[47668]: pgmap v1962: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:07 vm08.local ceph-mon[56824]: pgmap v1962: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:09.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:08 vm08.local ceph-mon[56824]: pgmap v1963: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:08 vm00.local ceph-mon[47668]: pgmap v1963: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:10.230 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:10.230 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:10.260 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:10.260 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:11.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:11 vm08.local ceph-mon[56824]: pgmap v1964: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:11 vm00.local ceph-mon[47668]: pgmap v1964: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:13.431 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:13 vm00.local ceph-mon[47668]: pgmap v1965: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:13 vm08.local ceph-mon[56824]: pgmap v1965: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:15.262 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:15.262 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:15.423 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:15.423 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:15.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:15 vm00.local ceph-mon[47668]: pgmap v1966: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:15.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:15 vm08.local ceph-mon[56824]: pgmap v1966: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:17 vm00.local ceph-mon[47668]: pgmap v1967: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:17 vm08.local ceph-mon[56824]: pgmap v1967: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:18 vm08.local ceph-mon[56824]: pgmap v1968: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:18 vm00.local ceph-mon[47668]: pgmap v1968: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:20.425 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:20.425 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:20.453 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:20.453 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:21 vm08.local ceph-mon[56824]: pgmap v1969: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:21 vm00.local ceph-mon[47668]: pgmap v1969: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:23 vm00.local ceph-mon[47668]: pgmap v1970: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:23 vm08.local ceph-mon[56824]: pgmap v1970: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:25.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:25.455 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:25.481 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:25.481 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:25 vm08.local ceph-mon[56824]: pgmap v1971: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:09:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:25 vm00.local ceph-mon[47668]: pgmap v1971: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:09:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:27 vm08.local ceph-mon[56824]: pgmap v1972: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:27 vm00.local ceph-mon[47668]: pgmap v1972: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:29.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:28 vm08.local ceph-mon[56824]: pgmap v1973: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:09:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:28 vm00.local ceph-mon[47668]: pgmap v1973: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:09:30.483 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:30.483 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:30.615 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:30.615 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:31 vm08.local ceph-mon[56824]: pgmap v1974: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:09:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:31 vm00.local ceph-mon[47668]: pgmap v1974: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:09:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:33 vm00.local ceph-mon[47668]: pgmap v1975: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:33 vm08.local ceph-mon[56824]: pgmap v1975: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:35.616 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:35.617 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:35 vm08.local ceph-mon[56824]: pgmap v1976: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:35.644 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:35.644 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:35.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:35 vm00.local ceph-mon[47668]: pgmap v1976: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:37 vm08.local ceph-mon[56824]: pgmap v1977: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:37.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:37 vm00.local ceph-mon[47668]: pgmap v1977: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:38 vm08.local ceph-mon[56824]: pgmap v1978: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:38 vm00.local ceph-mon[47668]: pgmap v1978: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:40.646 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:40.646 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:40.672 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:40.673 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:41.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:41 vm08.local ceph-mon[56824]: pgmap v1979: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:41 vm00.local ceph-mon[47668]: pgmap v1979: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:43 vm08.local ceph-mon[56824]: pgmap v1980: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:43 vm00.local ceph-mon[47668]: pgmap v1980: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:09:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:09:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:09:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:09:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:45 vm08.local ceph-mon[56824]: pgmap v1981: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:45.675 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:45.675 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:45 vm00.local ceph-mon[47668]: pgmap v1981: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:45.701 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:45.701 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:47 vm08.local ceph-mon[56824]: pgmap v1982: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:47 vm00.local ceph-mon[47668]: pgmap v1982: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:48 vm08.local ceph-mon[56824]: pgmap v1983: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:49.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:48 vm00.local ceph-mon[47668]: pgmap v1983: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:50.703 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:50.703 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:50.731 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:50.731 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:51 vm08.local ceph-mon[56824]: pgmap v1984: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:51 vm00.local ceph-mon[47668]: pgmap v1984: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:53 vm00.local ceph-mon[47668]: pgmap v1985: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:53.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:53 vm08.local ceph-mon[56824]: pgmap v1985: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:55.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:55 vm08.local ceph-mon[56824]: pgmap v1986: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:55 vm00.local ceph-mon[47668]: pgmap v1986: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:55.733 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:09:55.734 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:09:55.764 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:09:55.765 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:09:57.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:57 vm08.local ceph-mon[56824]: pgmap v1987: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:57 vm00.local ceph-mon[47668]: pgmap v1987: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:09:59.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:09:58 vm08.local ceph-mon[56824]: pgmap v1988: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:09:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:09:58 vm00.local ceph-mon[47668]: pgmap v1988: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:00.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:00 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-09T00:10:00.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:00 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-09T00:10:00.767 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:00.767 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:00.797 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:00.798 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:01.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:01 vm08.local ceph-mon[56824]: pgmap v1989: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:01 vm00.local ceph-mon[47668]: pgmap v1989: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:03.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:03 vm00.local ceph-mon[47668]: pgmap v1990: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:03.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:03 vm08.local ceph-mon[56824]: pgmap v1990: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:05.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:05 vm08.local ceph-mon[56824]: pgmap v1991: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:05 vm00.local ceph-mon[47668]: pgmap v1991: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:05.799 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:05.799 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:05.825 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:05.826 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:10:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:10:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:10:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:10:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:07 vm08.local ceph-mon[56824]: pgmap v1992: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:07 vm00.local ceph-mon[47668]: pgmap v1992: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:09.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:08 vm08.local ceph-mon[56824]: pgmap v1993: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:09.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:08 vm00.local ceph-mon[47668]: pgmap v1993: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:10.827 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:10.828 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:10.855 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:10.855 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:11 vm08.local ceph-mon[56824]: pgmap v1994: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:11 vm00.local ceph-mon[47668]: pgmap v1994: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:13 vm00.local ceph-mon[47668]: pgmap v1995: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:13 vm08.local ceph-mon[56824]: pgmap v1995: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:15 vm08.local ceph-mon[56824]: pgmap v1996: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:15.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:15 vm00.local ceph-mon[47668]: pgmap v1996: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:15.857 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:15.859 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:15.885 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:15.885 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:17.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:17 vm08.local ceph-mon[56824]: pgmap v1997: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:17 vm00.local ceph-mon[47668]: pgmap v1997: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:19.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:18 vm08.local ceph-mon[56824]: pgmap v1998: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:19.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:18 vm00.local ceph-mon[47668]: pgmap v1998: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:20.886 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:20.887 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:20.913 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:20.913 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:21 vm08.local ceph-mon[56824]: pgmap v1999: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:21 vm00.local ceph-mon[47668]: pgmap v1999: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:23 vm00.local ceph-mon[47668]: pgmap v2000: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:23 vm08.local ceph-mon[56824]: pgmap v2000: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:25 vm08.local ceph-mon[56824]: pgmap v2001: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:25 vm00.local ceph-mon[47668]: pgmap v2001: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:25.914 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:25.915 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:25.941 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:25.941 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:27 vm08.local ceph-mon[56824]: pgmap v2002: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:27 vm00.local ceph-mon[47668]: pgmap v2002: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:29.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:28 vm08.local ceph-mon[56824]: pgmap v2003: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:28 vm00.local ceph-mon[47668]: pgmap v2003: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:30.943 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:30.943 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:30.970 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:30.971 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:31 vm08.local ceph-mon[56824]: pgmap v2004: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:31 vm00.local ceph-mon[47668]: pgmap v2004: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:33 vm00.local ceph-mon[47668]: pgmap v2005: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:33 vm08.local ceph-mon[56824]: pgmap v2005: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:35 vm08.local ceph-mon[56824]: pgmap v2006: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:35 vm00.local ceph-mon[47668]: pgmap v2006: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:35.972 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:35.972 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:35.998 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:35.999 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:37 vm08.local ceph-mon[56824]: pgmap v2007: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:37 vm00.local ceph-mon[47668]: pgmap v2007: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:38 vm08.local ceph-mon[56824]: pgmap v2008: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:39.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:38 vm00.local ceph-mon[47668]: pgmap v2008: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:41.000 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:41.000 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:41.032 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:41.032 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:41 vm08.local ceph-mon[56824]: pgmap v2009: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:41 vm00.local ceph-mon[47668]: pgmap v2009: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:43 vm00.local ceph-mon[47668]: pgmap v2010: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:43.594 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:43 vm08.local ceph-mon[56824]: pgmap v2010: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:10:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:10:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:10:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:10:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:10:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:10:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:10:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:10:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:10:44.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:10:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:45 vm08.local ceph-mon[56824]: pgmap v2011: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:45 vm00.local ceph-mon[47668]: pgmap v2011: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:46.034 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:46.034 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:46.065 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:46.066 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:47 vm08.local ceph-mon[56824]: pgmap v2012: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:47 vm00.local ceph-mon[47668]: pgmap v2012: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:49.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:48 vm08.local ceph-mon[56824]: pgmap v2013: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:48 vm00.local ceph-mon[47668]: pgmap v2013: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:51.067 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:51.068 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:51.094 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:51.094 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:51 vm00.local ceph-mon[47668]: pgmap v2014: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:51 vm08.local ceph-mon[56824]: pgmap v2014: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:53.833 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:53 vm00.local ceph-mon[47668]: pgmap v2015: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:53 vm08.local ceph-mon[56824]: pgmap v2015: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:55.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:55 vm08.local ceph-mon[56824]: pgmap v2016: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:55 vm00.local ceph-mon[47668]: pgmap v2016: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:56.096 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:10:56.096 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:10:56.126 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:10:56.127 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:10:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:56 vm08.local ceph-mon[56824]: pgmap v2017: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:56 vm00.local ceph-mon[47668]: pgmap v2017: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:10:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:10:58 vm00.local ceph-mon[47668]: pgmap v2018: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:10:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:10:58 vm08.local ceph-mon[56824]: pgmap v2018: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:01.129 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:01.129 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:01.157 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:01.158 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:01 vm08.local ceph-mon[56824]: pgmap v2019: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:01 vm00.local ceph-mon[47668]: pgmap v2019: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:03.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:03 vm00.local ceph-mon[47668]: pgmap v2020: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:03 vm08.local ceph-mon[56824]: pgmap v2020: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:05 vm00.local ceph-mon[47668]: pgmap v2021: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:05 vm08.local ceph-mon[56824]: pgmap v2021: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:06.159 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:06.159 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:06.261 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:06.261 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:11:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:11:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:11:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:11:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:07 vm08.local ceph-mon[56824]: pgmap v2022: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:07 vm00.local ceph-mon[47668]: pgmap v2022: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:09 vm08.local ceph-mon[56824]: pgmap v2023: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:09 vm00.local ceph-mon[47668]: pgmap v2023: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:11.263 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:11.264 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:11.419 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:11.420 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:11 vm00.local ceph-mon[47668]: pgmap v2024: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:11 vm08.local ceph-mon[56824]: pgmap v2024: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:13 vm00.local ceph-mon[47668]: pgmap v2025: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:13 vm08.local ceph-mon[56824]: pgmap v2025: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:15 vm08.local ceph-mon[56824]: pgmap v2026: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:15 vm00.local ceph-mon[47668]: pgmap v2026: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:16.421 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:16.422 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:16.447 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:16.447 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:17 vm08.local ceph-mon[56824]: pgmap v2027: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:17 vm00.local ceph-mon[47668]: pgmap v2027: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:19.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:18 vm00.local ceph-mon[47668]: pgmap v2028: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:18 vm08.local ceph-mon[56824]: pgmap v2028: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:21.449 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:21.449 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:21.474 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:21.474 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:21 vm08.local ceph-mon[56824]: pgmap v2029: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:21 vm00.local ceph-mon[47668]: pgmap v2029: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:23 vm00.local ceph-mon[47668]: pgmap v2030: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:23 vm08.local ceph-mon[56824]: pgmap v2030: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:25 vm08.local ceph-mon[56824]: pgmap v2031: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:25 vm00.local ceph-mon[47668]: pgmap v2031: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:26.476 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:26.476 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:26.503 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:26.503 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:27 vm08.local ceph-mon[56824]: pgmap v2032: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:27 vm00.local ceph-mon[47668]: pgmap v2032: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:29.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:28 vm00.local ceph-mon[47668]: pgmap v2033: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:28 vm08.local ceph-mon[56824]: pgmap v2033: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:31.504 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:31.505 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:31.640 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:31.640 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:31 vm00.local ceph-mon[47668]: pgmap v2034: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:31 vm08.local ceph-mon[56824]: pgmap v2034: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:33.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:33 vm00.local ceph-mon[47668]: pgmap v2035: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:33 vm08.local ceph-mon[56824]: pgmap v2035: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:35 vm00.local ceph-mon[47668]: pgmap v2036: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:35 vm08.local ceph-mon[56824]: pgmap v2036: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:36.642 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:36.642 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:36.668 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:36.669 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:37 vm00.local ceph-mon[47668]: pgmap v2037: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:37 vm08.local ceph-mon[56824]: pgmap v2037: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:38 vm00.local ceph-mon[47668]: pgmap v2038: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:38 vm08.local ceph-mon[56824]: pgmap v2038: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:41 vm08.local ceph-mon[56824]: pgmap v2039: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:41.670 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:41.671 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:41 vm00.local ceph-mon[47668]: pgmap v2039: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:41.696 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:41.697 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:43 vm00.local ceph-mon[47668]: pgmap v2040: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:43 vm08.local ceph-mon[56824]: pgmap v2040: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:11:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:11:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:11:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:11:44.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:11:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:11:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:11:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:11:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:11:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:11:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:45 vm08.local ceph-mon[56824]: pgmap v2041: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:45 vm00.local ceph-mon[47668]: pgmap v2041: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:46.698 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:46.699 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:46.726 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:46.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:47 vm08.local ceph-mon[56824]: pgmap v2042: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:47 vm00.local ceph-mon[47668]: pgmap v2042: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:48 vm00.local ceph-mon[47668]: pgmap v2043: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:48 vm08.local ceph-mon[56824]: pgmap v2043: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:51 vm08.local ceph-mon[56824]: pgmap v2044: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:51 vm00.local ceph-mon[47668]: pgmap v2044: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:51.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:51.727 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:51.752 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:51.753 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:53.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:53 vm00.local ceph-mon[47668]: pgmap v2045: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:53 vm08.local ceph-mon[56824]: pgmap v2045: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:55 vm08.local ceph-mon[56824]: pgmap v2046: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:55 vm00.local ceph-mon[47668]: pgmap v2046: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:56.754 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:11:56.755 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:11:56.781 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:11:56.781 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:11:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:57 vm08.local ceph-mon[56824]: pgmap v2047: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:57 vm00.local ceph-mon[47668]: pgmap v2047: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:11:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:11:58 vm00.local ceph-mon[47668]: pgmap v2048: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:11:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:11:58 vm08.local ceph-mon[56824]: pgmap v2048: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:01 vm08.local ceph-mon[56824]: pgmap v2049: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:01 vm00.local ceph-mon[47668]: pgmap v2049: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:01.783 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:01.783 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:01.810 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:01.811 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:03 vm00.local ceph-mon[47668]: pgmap v2050: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:03 vm08.local ceph-mon[56824]: pgmap v2050: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:05 vm08.local ceph-mon[56824]: pgmap v2051: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:05 vm00.local ceph-mon[47668]: pgmap v2051: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:06.812 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:06.813 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:06.839 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:06.839 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:07 vm08.local ceph-mon[56824]: pgmap v2052: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:07 vm00.local ceph-mon[47668]: pgmap v2052: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:08 vm08.local ceph-mon[56824]: pgmap v2053: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:08 vm00.local ceph-mon[47668]: pgmap v2053: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:11.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:11 vm08.local ceph-mon[56824]: pgmap v2054: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:11 vm00.local ceph-mon[47668]: pgmap v2054: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:11.841 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:11.841 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:11.867 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:11.868 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:13 vm08.local ceph-mon[56824]: pgmap v2055: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:13 vm00.local ceph-mon[47668]: pgmap v2055: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:15 vm08.local ceph-mon[56824]: pgmap v2056: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:15 vm00.local ceph-mon[47668]: pgmap v2056: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:16.869 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:16.870 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:17.088 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:17.088 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:17 vm00.local ceph-mon[47668]: pgmap v2057: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:17 vm08.local ceph-mon[56824]: pgmap v2057: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:19 vm00.local ceph-mon[47668]: pgmap v2058: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:19 vm08.local ceph-mon[56824]: pgmap v2058: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:21.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:20 vm00.local ceph-mon[47668]: pgmap v2059: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:21.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:20 vm08.local ceph-mon[56824]: pgmap v2059: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:22.090 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:22.090 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:22.174 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:22.175 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:23 vm00.local ceph-mon[47668]: pgmap v2060: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:23.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:23 vm08.local ceph-mon[56824]: pgmap v2060: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:25 vm08.local ceph-mon[56824]: pgmap v2061: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:25 vm00.local ceph-mon[47668]: pgmap v2061: 97 pgs: 97 active+clean; 453 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:27.176 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:27.177 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:27.203 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:27.203 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:27 vm08.local ceph-mon[56824]: pgmap v2062: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:27 vm00.local ceph-mon[47668]: pgmap v2062: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:28 vm08.local ceph-mon[56824]: pgmap v2063: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:28 vm00.local ceph-mon[47668]: pgmap v2063: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:31 vm00.local ceph-mon[47668]: pgmap v2064: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:31 vm08.local ceph-mon[56824]: pgmap v2064: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:32.204 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:32.222 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:32.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:32.361 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:33.840 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:33 vm00.local ceph-mon[47668]: pgmap v2065: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:33 vm08.local ceph-mon[56824]: pgmap v2065: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:35 vm08.local ceph-mon[56824]: pgmap v2066: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:35 vm00.local ceph-mon[47668]: pgmap v2066: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:37.362 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:37.363 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:37.388 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:37.389 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:37 vm08.local ceph-mon[56824]: pgmap v2067: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:37 vm00.local ceph-mon[47668]: pgmap v2067: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:38 vm08.local ceph-mon[56824]: pgmap v2068: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:38 vm00.local ceph-mon[47668]: pgmap v2068: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:41 vm08.local ceph-mon[56824]: pgmap v2069: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:41 vm00.local ceph-mon[47668]: pgmap v2069: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:42.390 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:42.391 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:42.418 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:42.418 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:43 vm08.local ceph-mon[56824]: pgmap v2070: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:43 vm00.local ceph-mon[47668]: pgmap v2070: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:12:44.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:12:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:12:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:12:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:45 vm00.local ceph-mon[47668]: pgmap v2071: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:45 vm08.local ceph-mon[56824]: pgmap v2071: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:47.419 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:47.420 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:47.446 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:47.446 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:47 vm00.local ceph-mon[47668]: pgmap v2072: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:47 vm08.local ceph-mon[56824]: pgmap v2072: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:49 vm08.local ceph-mon[56824]: pgmap v2073: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:49 vm00.local ceph-mon[47668]: pgmap v2073: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:51 vm08.local ceph-mon[56824]: pgmap v2074: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:51 vm00.local ceph-mon[47668]: pgmap v2074: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:52.447 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:52.448 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:52.474 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:52.475 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:53 vm00.local ceph-mon[47668]: pgmap v2075: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:53 vm08.local ceph-mon[56824]: pgmap v2075: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:55 vm00.local ceph-mon[47668]: pgmap v2076: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:55 vm08.local ceph-mon[56824]: pgmap v2076: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:57.476 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:12:57.476 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:12:57.503 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:12:57.503 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:12:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:57 vm08.local ceph-mon[56824]: pgmap v2077: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:57 vm00.local ceph-mon[47668]: pgmap v2077: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:12:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:12:59 vm08.local ceph-mon[56824]: pgmap v2078: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:12:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:12:59 vm00.local ceph-mon[47668]: pgmap v2078: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:01 vm08.local ceph-mon[56824]: pgmap v2079: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:01 vm00.local ceph-mon[47668]: pgmap v2079: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:02.504 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:02.505 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:02.531 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:02.532 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:03.843 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:03 vm00.local ceph-mon[47668]: pgmap v2080: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:03 vm08.local ceph-mon[56824]: pgmap v2080: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:05 vm08.local ceph-mon[56824]: pgmap v2081: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:05 vm00.local ceph-mon[47668]: pgmap v2081: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:13:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:13:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:13:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:13:07.533 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:07.534 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:07.560 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:07.560 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:07 vm08.local ceph-mon[56824]: pgmap v2082: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:07 vm00.local ceph-mon[47668]: pgmap v2082: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:09 vm08.local ceph-mon[56824]: pgmap v2083: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:09 vm00.local ceph-mon[47668]: pgmap v2083: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:11 vm08.local ceph-mon[56824]: pgmap v2084: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:11 vm00.local ceph-mon[47668]: pgmap v2084: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:12.561 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:12.562 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:12.589 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:12.589 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:13 vm00.local ceph-mon[47668]: pgmap v2085: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:13 vm08.local ceph-mon[56824]: pgmap v2085: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:15 vm08.local ceph-mon[56824]: pgmap v2086: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:15 vm00.local ceph-mon[47668]: pgmap v2086: 97 pgs: 97 active+clean; 453 KiB data, 77 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:17.591 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:17.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:17.616 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:17.617 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:17 vm08.local ceph-mon[56824]: pgmap v2087: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:17 vm00.local ceph-mon[47668]: pgmap v2087: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:19 vm08.local ceph-mon[56824]: pgmap v2088: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:19 vm00.local ceph-mon[47668]: pgmap v2088: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:21 vm08.local ceph-mon[56824]: pgmap v2089: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:21 vm00.local ceph-mon[47668]: pgmap v2089: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:22.618 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:22.619 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:22.645 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:22.646 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:23.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:23 vm00.local ceph-mon[47668]: pgmap v2090: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:23 vm08.local ceph-mon[56824]: pgmap v2090: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:25 vm08.local ceph-mon[56824]: pgmap v2091: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:25 vm00.local ceph-mon[47668]: pgmap v2091: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:27 vm08.local ceph-mon[56824]: pgmap v2092: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:27.647 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:27.647 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:27.673 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:27.674 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:27 vm00.local ceph-mon[47668]: pgmap v2092: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:29 vm08.local ceph-mon[56824]: pgmap v2093: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:29 vm00.local ceph-mon[47668]: pgmap v2093: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:31 vm08.local ceph-mon[56824]: pgmap v2094: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:31 vm00.local ceph-mon[47668]: pgmap v2094: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:32.675 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:32.675 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:32.701 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:32.701 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:33.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:33 vm00.local ceph-mon[47668]: pgmap v2095: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:33 vm08.local ceph-mon[56824]: pgmap v2095: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:35 vm08.local ceph-mon[56824]: pgmap v2096: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:35 vm00.local ceph-mon[47668]: pgmap v2096: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:37 vm08.local ceph-mon[56824]: pgmap v2097: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:37 vm00.local ceph-mon[47668]: pgmap v2097: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:37.703 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:37.703 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:37.730 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:37.731 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:39 vm08.local ceph-mon[56824]: pgmap v2098: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:39 vm00.local ceph-mon[47668]: pgmap v2098: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:41 vm08.local ceph-mon[56824]: pgmap v2099: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:41 vm00.local ceph-mon[47668]: pgmap v2099: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:42.732 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:42.732 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:42.757 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:42.757 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:43.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:43 vm00.local ceph-mon[47668]: pgmap v2100: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:43 vm08.local ceph-mon[56824]: pgmap v2100: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:44.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:13:44.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:13:44.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:44 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:13:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:13:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:13:44.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:44 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:13:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:45 vm08.local ceph-mon[56824]: pgmap v2101: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:13:45.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:13:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:45 vm00.local ceph-mon[47668]: pgmap v2101: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:13:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:13:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:46 vm08.local ceph-mon[56824]: pgmap v2102: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:46.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:46 vm00.local ceph-mon[47668]: pgmap v2102: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:47.759 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:47.760 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:47.787 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:47.787 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:49 vm08.local ceph-mon[56824]: pgmap v2103: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:49.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:49 vm00.local ceph-mon[47668]: pgmap v2103: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:51 vm08.local ceph-mon[56824]: pgmap v2104: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:51 vm00.local ceph-mon[47668]: pgmap v2104: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:52.789 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:52.790 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:52.816 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:52.817 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:53 vm08.local ceph-mon[56824]: pgmap v2105: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:53 vm00.local ceph-mon[47668]: pgmap v2105: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:55 vm08.local ceph-mon[56824]: pgmap v2106: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:55 vm00.local ceph-mon[47668]: pgmap v2106: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:57.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:57 vm08.local ceph-mon[56824]: pgmap v2107: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:57 vm00.local ceph-mon[47668]: pgmap v2107: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:13:57.818 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:13:57.819 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:13:57.846 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:13:57.846 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:13:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:13:59 vm08.local ceph-mon[56824]: pgmap v2108: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:13:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:13:59 vm00.local ceph-mon[47668]: pgmap v2108: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:01 vm08.local ceph-mon[56824]: pgmap v2109: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:01 vm00.local ceph-mon[47668]: pgmap v2109: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:02.848 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:02.848 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:02.875 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:02.875 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:03 vm08.local ceph-mon[56824]: pgmap v2110: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:03.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:03 vm00.local ceph-mon[47668]: pgmap v2110: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:05 vm08.local ceph-mon[56824]: pgmap v2111: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:05 vm00.local ceph-mon[47668]: pgmap v2111: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:14:06.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:14:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:14:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:14:07.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:07 vm08.local ceph-mon[56824]: pgmap v2112: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:07 vm00.local ceph-mon[47668]: pgmap v2112: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:07.876 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:07.877 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:07.903 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:07.903 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:09 vm08.local ceph-mon[56824]: pgmap v2113: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:09 vm00.local ceph-mon[47668]: pgmap v2113: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:11 vm08.local ceph-mon[56824]: pgmap v2114: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:11 vm00.local ceph-mon[47668]: pgmap v2114: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:12.905 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:12.905 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:12.932 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:12.932 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:13.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:13 vm08.local ceph-mon[56824]: pgmap v2115: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:13 vm00.local ceph-mon[47668]: pgmap v2115: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:15 vm08.local ceph-mon[56824]: pgmap v2116: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:15 vm00.local ceph-mon[47668]: pgmap v2116: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:17 vm08.local ceph-mon[56824]: pgmap v2117: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:17 vm00.local ceph-mon[47668]: pgmap v2117: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:17.933 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:17.934 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:17.961 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:17.962 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:19 vm08.local ceph-mon[56824]: pgmap v2118: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:19 vm00.local ceph-mon[47668]: pgmap v2118: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:21 vm08.local ceph-mon[56824]: pgmap v2119: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:21 vm00.local ceph-mon[47668]: pgmap v2119: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:22.963 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:22.964 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:22.990 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:22.990 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:23 vm08.local ceph-mon[56824]: pgmap v2120: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:23 vm00.local ceph-mon[47668]: pgmap v2120: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:25.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:25 vm08.local ceph-mon[56824]: pgmap v2121: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:25 vm00.local ceph-mon[47668]: pgmap v2121: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:27.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:27 vm08.local ceph-mon[56824]: pgmap v2122: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:27 vm00.local ceph-mon[47668]: pgmap v2122: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:27.991 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:27.992 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:28.018 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:28.019 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:29 vm08.local ceph-mon[56824]: pgmap v2123: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:29 vm00.local ceph-mon[47668]: pgmap v2123: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:31 vm08.local ceph-mon[56824]: pgmap v2124: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:31 vm00.local ceph-mon[47668]: pgmap v2124: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:33.020 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:33.021 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:33.050 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:33.050 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:33.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:33 vm08.local ceph-mon[56824]: pgmap v2125: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:33.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:33 vm00.local ceph-mon[47668]: pgmap v2125: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:35.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:35 vm08.local ceph-mon[56824]: pgmap v2126: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:35 vm00.local ceph-mon[47668]: pgmap v2126: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:37.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:37 vm08.local ceph-mon[56824]: pgmap v2127: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:37 vm00.local ceph-mon[47668]: pgmap v2127: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:38.052 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:38.052 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:38.080 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:38.080 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:39 vm08.local ceph-mon[56824]: pgmap v2128: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:39 vm00.local ceph-mon[47668]: pgmap v2128: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:41 vm08.local ceph-mon[56824]: pgmap v2129: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:41 vm00.local ceph-mon[47668]: pgmap v2129: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:43.082 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:43.082 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:43.112 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:43.113 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:43 vm08.local ceph-mon[56824]: pgmap v2130: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:43 vm00.local ceph-mon[47668]: pgmap v2130: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: pgmap v2131: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:14:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:14:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:14:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:14:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:14:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:14:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: pgmap v2131: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:14:45.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:14:45.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:14:45.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:14:45.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:14:45.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:14:47.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:47 vm08.local ceph-mon[56824]: pgmap v2132: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:47 vm00.local ceph-mon[47668]: pgmap v2132: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:48.114 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:48.114 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:48.139 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:48.140 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:49 vm08.local ceph-mon[56824]: pgmap v2133: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:49.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:49 vm00.local ceph-mon[47668]: pgmap v2133: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:51 vm08.local ceph-mon[56824]: pgmap v2134: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:51.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:51 vm00.local ceph-mon[47668]: pgmap v2134: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:53.141 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:53.142 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:53.167 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:53.167 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:53 vm08.local ceph-mon[56824]: pgmap v2135: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:53 vm00.local ceph-mon[47668]: pgmap v2135: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:55.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:55 vm08.local ceph-mon[56824]: pgmap v2136: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:55 vm00.local ceph-mon[47668]: pgmap v2136: 97 pgs: 97 active+clean; 453 KiB data, 81 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:57 vm00.local ceph-mon[47668]: pgmap v2137: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:57 vm08.local ceph-mon[56824]: pgmap v2137: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:14:58.169 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:14:58.169 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:14:58.196 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:14:58.196 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:14:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:14:59 vm08.local ceph-mon[56824]: pgmap v2138: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:14:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:14:59 vm00.local ceph-mon[47668]: pgmap v2138: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:01 vm08.local ceph-mon[56824]: pgmap v2139: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:01 vm00.local ceph-mon[47668]: pgmap v2139: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:03.198 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:03.198 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:03.224 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:03.225 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:03 vm08.local ceph-mon[56824]: pgmap v2140: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:03.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:03 vm00.local ceph-mon[47668]: pgmap v2140: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:05 vm08.local ceph-mon[56824]: pgmap v2141: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:05 vm00.local ceph-mon[47668]: pgmap v2141: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:15:06.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:15:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:15:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:15:07.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:07 vm08.local ceph-mon[56824]: pgmap v2142: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:07 vm00.local ceph-mon[47668]: pgmap v2142: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:08.226 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:08.226 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:08.252 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:08.253 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:09 vm08.local ceph-mon[56824]: pgmap v2143: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:09 vm00.local ceph-mon[47668]: pgmap v2143: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:11 vm08.local ceph-mon[56824]: pgmap v2144: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:11 vm00.local ceph-mon[47668]: pgmap v2144: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:13.254 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:13.254 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:13.280 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:13.281 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:13.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:13 vm08.local ceph-mon[56824]: pgmap v2145: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:13 vm00.local ceph-mon[47668]: pgmap v2145: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:15.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:15 vm08.local ceph-mon[56824]: pgmap v2146: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:15 vm00.local ceph-mon[47668]: pgmap v2146: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:17.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:17 vm08.local ceph-mon[56824]: pgmap v2147: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:17 vm00.local ceph-mon[47668]: pgmap v2147: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:18.282 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:18.283 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:18.309 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:18.310 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:19 vm08.local ceph-mon[56824]: pgmap v2148: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:19 vm00.local ceph-mon[47668]: pgmap v2148: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:21 vm08.local ceph-mon[56824]: pgmap v2149: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:21 vm00.local ceph-mon[47668]: pgmap v2149: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:23.311 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:23.312 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:23.338 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:23.338 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:23.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:23 vm08.local ceph-mon[56824]: pgmap v2150: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:23 vm00.local ceph-mon[47668]: pgmap v2150: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:25 vm08.local ceph-mon[56824]: pgmap v2151: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:25 vm00.local ceph-mon[47668]: pgmap v2151: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:27 vm08.local ceph-mon[56824]: pgmap v2152: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:27 vm00.local ceph-mon[47668]: pgmap v2152: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:28.340 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:28.340 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:28.366 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:28.367 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:29 vm08.local ceph-mon[56824]: pgmap v2153: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:29 vm00.local ceph-mon[47668]: pgmap v2153: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:31 vm08.local ceph-mon[56824]: pgmap v2154: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:31 vm00.local ceph-mon[47668]: pgmap v2154: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:33.368 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:33.369 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:33.395 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:33.395 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:33.852 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:33 vm00.local ceph-mon[47668]: pgmap v2155: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:33 vm08.local ceph-mon[56824]: pgmap v2155: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:35 vm08.local ceph-mon[56824]: pgmap v2156: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:35 vm00.local ceph-mon[47668]: pgmap v2156: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:37 vm08.local ceph-mon[56824]: pgmap v2157: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:37 vm00.local ceph-mon[47668]: pgmap v2157: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:38.397 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:38.397 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:38.429 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:38.429 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:39 vm08.local ceph-mon[56824]: pgmap v2158: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:39 vm00.local ceph-mon[47668]: pgmap v2158: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:41 vm08.local ceph-mon[56824]: pgmap v2159: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:41 vm00.local ceph-mon[47668]: pgmap v2159: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:43.430 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:43.431 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:43.459 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:43.459 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:43.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:43 vm08.local ceph-mon[56824]: pgmap v2160: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:43 vm00.local ceph-mon[47668]: pgmap v2160: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:45.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: pgmap v2161: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:15:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:15:45.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: pgmap v2161: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:15:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:15:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:47 vm00.local ceph-mon[47668]: pgmap v2162: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:47 vm08.local ceph-mon[56824]: pgmap v2162: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:48.460 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:48.461 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:48.487 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:48.487 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:49.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:49 vm08.local ceph-mon[56824]: pgmap v2163: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:49 vm00.local ceph-mon[47668]: pgmap v2163: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:51.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:51 vm08.local ceph-mon[56824]: pgmap v2164: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:51 vm00.local ceph-mon[47668]: pgmap v2164: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:53.489 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:53.489 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:53.515 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:53.515 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:53.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:53 vm08.local ceph-mon[56824]: pgmap v2165: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:53 vm00.local ceph-mon[47668]: pgmap v2165: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:55.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:55 vm08.local ceph-mon[56824]: pgmap v2166: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:55 vm00.local ceph-mon[47668]: pgmap v2166: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:57 vm00.local ceph-mon[47668]: pgmap v2167: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:57 vm08.local ceph-mon[56824]: pgmap v2167: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:15:58.517 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:15:58.517 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:15:58.544 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:15:58.544 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:15:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:15:59 vm08.local ceph-mon[56824]: pgmap v2168: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:15:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:15:59 vm00.local ceph-mon[47668]: pgmap v2168: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:01.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:01 vm08.local ceph-mon[56824]: pgmap v2169: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:01 vm00.local ceph-mon[47668]: pgmap v2169: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:03.545 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:03.546 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:03.572 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:03.573 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:03 vm08.local ceph-mon[56824]: pgmap v2170: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:03.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:03 vm00.local ceph-mon[47668]: pgmap v2170: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:05 vm08.local ceph-mon[56824]: pgmap v2171: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:05 vm00.local ceph-mon[47668]: pgmap v2171: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:16:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:16:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:16:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:16:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:07 vm00.local ceph-mon[47668]: pgmap v2172: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:07 vm08.local ceph-mon[56824]: pgmap v2172: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:08.574 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:08.575 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:08.601 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:08.602 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:09 vm08.local ceph-mon[56824]: pgmap v2173: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:09 vm00.local ceph-mon[47668]: pgmap v2173: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:11.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:11 vm08.local ceph-mon[56824]: pgmap v2174: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:11 vm00.local ceph-mon[47668]: pgmap v2174: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:13.603 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:13.604 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:13.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:13 vm08.local ceph-mon[56824]: pgmap v2175: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:13.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:13.637 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:13.658 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:13 vm00.local ceph-mon[47668]: pgmap v2175: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:15 vm00.local ceph-mon[47668]: pgmap v2176: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:15 vm08.local ceph-mon[56824]: pgmap v2176: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:17 vm00.local ceph-mon[47668]: pgmap v2177: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:17.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:17 vm08.local ceph-mon[56824]: pgmap v2177: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:18.639 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:18.639 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:18.668 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:18.668 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:19 vm08.local ceph-mon[56824]: pgmap v2178: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:19 vm00.local ceph-mon[47668]: pgmap v2178: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:21.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:21 vm08.local ceph-mon[56824]: pgmap v2179: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:21 vm00.local ceph-mon[47668]: pgmap v2179: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:23.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:23 vm08.local ceph-mon[56824]: pgmap v2180: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:23.669 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:23.670 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:23 vm00.local ceph-mon[47668]: pgmap v2180: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:23.696 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:23.696 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:25 vm00.local ceph-mon[47668]: pgmap v2181: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:25.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:25 vm08.local ceph-mon[56824]: pgmap v2181: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:27 vm00.local ceph-mon[47668]: pgmap v2182: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:27 vm08.local ceph-mon[56824]: pgmap v2182: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:28.698 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:28.698 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:28.725 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:28.726 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:29.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:29 vm08.local ceph-mon[56824]: pgmap v2183: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:29 vm00.local ceph-mon[47668]: pgmap v2183: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:31.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:31 vm08.local ceph-mon[56824]: pgmap v2184: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:31 vm00.local ceph-mon[47668]: pgmap v2184: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:33.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:33 vm00.local ceph-mon[47668]: pgmap v2185: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:33.727 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:33.727 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:33.753 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:33.753 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:33 vm08.local ceph-mon[56824]: pgmap v2185: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:35.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:35 vm00.local ceph-mon[47668]: pgmap v2186: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:35.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:35 vm08.local ceph-mon[56824]: pgmap v2186: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:37.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:37 vm00.local ceph-mon[47668]: pgmap v2187: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:37 vm08.local ceph-mon[56824]: pgmap v2187: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:38.755 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:38.756 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:38.783 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:38.784 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:39.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:39 vm08.local ceph-mon[56824]: pgmap v2188: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:39.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:39 vm00.local ceph-mon[47668]: pgmap v2188: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:41.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:41 vm08.local ceph-mon[56824]: pgmap v2189: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:41.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:41 vm00.local ceph-mon[47668]: pgmap v2189: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:43.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:43 vm00.local ceph-mon[47668]: pgmap v2190: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:43.785 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:43.786 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:43.812 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:43.813 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:43 vm08.local ceph-mon[56824]: pgmap v2190: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:45 vm00.local ceph-mon[47668]: pgmap v2191: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:45.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:45 vm08.local ceph-mon[56824]: pgmap v2191: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:45.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:45.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:45.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:46.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:46 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:46.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:46 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:16:46.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:46 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:16:46.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:46 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:46.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:46 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:16:46.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:46 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:16:47.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:47 vm00.local ceph-mon[47668]: pgmap v2192: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:47.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:47 vm08.local ceph-mon[56824]: pgmap v2192: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:48.814 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:48.815 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:48.843 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:48.843 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:49.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:49 vm08.local ceph-mon[56824]: pgmap v2193: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:49.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:49 vm00.local ceph-mon[47668]: pgmap v2193: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:51.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:51 vm00.local ceph-mon[47668]: pgmap v2194: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:51 vm08.local ceph-mon[56824]: pgmap v2194: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:53 vm00.local ceph-mon[47668]: pgmap v2195: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:53.844 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:53.845 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:53 vm08.local ceph-mon[56824]: pgmap v2195: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:53.885 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:53.885 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:55.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:55 vm00.local ceph-mon[47668]: pgmap v2196: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:55 vm08.local ceph-mon[56824]: pgmap v2196: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:57.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:57 vm00.local ceph-mon[47668]: pgmap v2197: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:57 vm08.local ceph-mon[56824]: pgmap v2197: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:16:58.886 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:16:58.887 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:16:58.913 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:16:58.913 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:16:59.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:16:59 vm08.local ceph-mon[56824]: pgmap v2198: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:16:59.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:16:59 vm00.local ceph-mon[47668]: pgmap v2198: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:01.649 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:01 vm08.local ceph-mon[56824]: pgmap v2199: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:01.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:01 vm00.local ceph-mon[47668]: pgmap v2199: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:03.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:03 vm00.local ceph-mon[47668]: pgmap v2200: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:03 vm08.local ceph-mon[56824]: pgmap v2200: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:03.915 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:03.915 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:03.941 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:03.942 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:05 vm00.local ceph-mon[47668]: pgmap v2201: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:05 vm08.local ceph-mon[56824]: pgmap v2201: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:17:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:17:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:17:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:17:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:07 vm00.local ceph-mon[47668]: pgmap v2202: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:07 vm08.local ceph-mon[56824]: pgmap v2202: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:08.943 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:08.943 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:08.969 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:08.969 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:09.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:09 vm08.local ceph-mon[56824]: pgmap v2203: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:09 vm00.local ceph-mon[47668]: pgmap v2203: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:11 vm00.local ceph-mon[47668]: pgmap v2204: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:11 vm08.local ceph-mon[56824]: pgmap v2204: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:13.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:13 vm00.local ceph-mon[47668]: pgmap v2205: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:13 vm08.local ceph-mon[56824]: pgmap v2205: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:13.971 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:13.971 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:13.998 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:13.998 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:15 vm00.local ceph-mon[47668]: pgmap v2206: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:15 vm08.local ceph-mon[56824]: pgmap v2206: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:17 vm00.local ceph-mon[47668]: pgmap v2207: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:17 vm08.local ceph-mon[56824]: pgmap v2207: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:19.000 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:19.000 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:19.026 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:19.027 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:19.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:19 vm08.local ceph-mon[56824]: pgmap v2208: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:19 vm00.local ceph-mon[47668]: pgmap v2208: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:21.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:21 vm00.local ceph-mon[47668]: pgmap v2209: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:21 vm08.local ceph-mon[56824]: pgmap v2209: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:23.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:23 vm00.local ceph-mon[47668]: pgmap v2210: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:23 vm08.local ceph-mon[56824]: pgmap v2210: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:24.028 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:24.028 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:24.054 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:24.055 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:25.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:25 vm00.local ceph-mon[47668]: pgmap v2211: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:25 vm08.local ceph-mon[56824]: pgmap v2211: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:27.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:27 vm00.local ceph-mon[47668]: pgmap v2212: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:27 vm08.local ceph-mon[56824]: pgmap v2212: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:29.056 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:29.057 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:29.084 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:29.085 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:29.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:29 vm08.local ceph-mon[56824]: pgmap v2213: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:29.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:29 vm00.local ceph-mon[47668]: pgmap v2213: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:31.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:31 vm00.local ceph-mon[47668]: pgmap v2214: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:31 vm08.local ceph-mon[56824]: pgmap v2214: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:33.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:33 vm00.local ceph-mon[47668]: pgmap v2215: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:33 vm08.local ceph-mon[56824]: pgmap v2215: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:34.086 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:34.087 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:34.172 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:34.173 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:35 vm08.local ceph-mon[56824]: pgmap v2216: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:35 vm00.local ceph-mon[47668]: pgmap v2216: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:38.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:37 vm08.local ceph-mon[56824]: pgmap v2217: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:38.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:37 vm00.local ceph-mon[47668]: pgmap v2217: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:39.174 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:39.174 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:39.294 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:39.296 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:39.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:39 vm08.local ceph-mon[56824]: pgmap v2218: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:39.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:39 vm00.local ceph-mon[47668]: pgmap v2218: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:41.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:41 vm08.local ceph-mon[56824]: pgmap v2219: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:41.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:41 vm00.local ceph-mon[47668]: pgmap v2219: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:43.863 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:43 vm00.local ceph-mon[47668]: pgmap v2220: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:43.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:43 vm08.local ceph-mon[56824]: pgmap v2220: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:44.297 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:44.297 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:44.420 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:44.420 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:44 vm08.local ceph-mon[56824]: pgmap v2221: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:44 vm00.local ceph-mon[47668]: pgmap v2221: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:45.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:45.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:45.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:45 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:46.019 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:46.019 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:46.019 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:45 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:47.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:46 vm00.local ceph-mon[47668]: pgmap v2222: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:47.193 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:46 vm08.local ceph-mon[56824]: pgmap v2222: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:48.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:48 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:48.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:48 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:17:48.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:48 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:48.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:48 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:48.944 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:48 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:17:48.944 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:48 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:49.422 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:49.423 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:49.587 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:49.588 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:49.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:49 vm00.local ceph-mon[47668]: pgmap v2223: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:49.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:49.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:50.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:49 vm08.local ceph-mon[56824]: pgmap v2223: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:50.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:50.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:17:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:50 vm08.local ceph-mon[56824]: pgmap v2224: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:51.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:50 vm00.local ceph-mon[47668]: pgmap v2224: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:53.865 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:53 vm00.local ceph-mon[47668]: pgmap v2225: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:53 vm08.local ceph-mon[56824]: pgmap v2225: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:54.590 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:54.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:54.748 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:54.748 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:55 vm08.local ceph-mon[56824]: pgmap v2226: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:55 vm00.local ceph-mon[47668]: pgmap v2226: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:56 vm08.local ceph-mon[56824]: pgmap v2227: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:56 vm00.local ceph-mon[47668]: pgmap v2227: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:17:59.750 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:17:59.751 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:17:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:17:59 vm08.local ceph-mon[56824]: pgmap v2228: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:17:59.895 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:17:59.896 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:17:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:17:59 vm00.local ceph-mon[47668]: pgmap v2228: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:01 vm08.local ceph-mon[56824]: pgmap v2229: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:01 vm00.local ceph-mon[47668]: pgmap v2229: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:02 vm00.local ceph-mon[47668]: pgmap v2230: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:02 vm08.local ceph-mon[56824]: pgmap v2230: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:04.897 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:04.898 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:04.924 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:04.925 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:05 vm00.local ceph-mon[47668]: pgmap v2231: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:05 vm08.local ceph-mon[56824]: pgmap v2231: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:18:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:18:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:18:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:18:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:07 vm00.local ceph-mon[47668]: pgmap v2232: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:07 vm08.local ceph-mon[56824]: pgmap v2232: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:09.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:09 vm00.local ceph-mon[47668]: pgmap v2233: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:09 vm08.local ceph-mon[56824]: pgmap v2233: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:09.926 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:09.926 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:09.952 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:09.953 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:11 vm00.local ceph-mon[47668]: pgmap v2234: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:11 vm08.local ceph-mon[56824]: pgmap v2234: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:13.659 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:13 vm00.local ceph-mon[47668]: pgmap v2235: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:13 vm08.local ceph-mon[56824]: pgmap v2235: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:14.954 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:14.955 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:14.982 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:14.983 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:15 vm00.local ceph-mon[47668]: pgmap v2236: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:15 vm08.local ceph-mon[56824]: pgmap v2236: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:17.453 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:17 vm08.local ceph-mon[56824]: pgmap v2237: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:17 vm00.local ceph-mon[47668]: pgmap v2237: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:19.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:19 vm00.local ceph-mon[47668]: pgmap v2238: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:19.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:19 vm08.local ceph-mon[56824]: pgmap v2238: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:19.984 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:19.985 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:20.010 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:20.011 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:22.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:21 vm08.local ceph-mon[56824]: pgmap v2239: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:22.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:21 vm00.local ceph-mon[47668]: pgmap v2239: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:23.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:22 vm08.local ceph-mon[56824]: pgmap v2240: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:23.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:22 vm00.local ceph-mon[47668]: pgmap v2240: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:25.012 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:25.013 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:25.046 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:25.046 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:25.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:25 vm00.local ceph-mon[47668]: pgmap v2241: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:26.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:25 vm08.local ceph-mon[56824]: pgmap v2241: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:27.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:26 vm00.local ceph-mon[47668]: pgmap v2242: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:27.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:26 vm08.local ceph-mon[56824]: pgmap v2242: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:29.931 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:29 vm00.local ceph-mon[47668]: pgmap v2243: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:30.048 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:30.049 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:30.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:29 vm08.local ceph-mon[56824]: pgmap v2243: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:30.181 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:30.182 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:31.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:30 vm08.local ceph-mon[56824]: pgmap v2244: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:31.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:30 vm00.local ceph-mon[47668]: pgmap v2244: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:33.868 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:33 vm00.local ceph-mon[47668]: pgmap v2245: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:33 vm08.local ceph-mon[56824]: pgmap v2245: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:35.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:34 vm08.local ceph-mon[56824]: pgmap v2246: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:34 vm00.local ceph-mon[47668]: pgmap v2246: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:35.191 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:35.191 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:35.291 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:35.292 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:37 vm08.local ceph-mon[56824]: pgmap v2247: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:37.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:37 vm00.local ceph-mon[47668]: pgmap v2247: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:39 vm00.local ceph-mon[47668]: pgmap v2248: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:40.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:39 vm08.local ceph-mon[56824]: pgmap v2248: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:40.294 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:40.294 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:40.395 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:40.396 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:40 vm08.local ceph-mon[56824]: pgmap v2249: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:40 vm00.local ceph-mon[47668]: pgmap v2249: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:43.716 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:43 vm00.local ceph-mon[47668]: pgmap v2250: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:43.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:43 vm08.local ceph-mon[56824]: pgmap v2250: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:18:45.398 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:45.399 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:45.517 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:45.517 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:45.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:45 vm00.local ceph-mon[47668]: pgmap v2251: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:46.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:45 vm08.local ceph-mon[56824]: pgmap v2251: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:46 vm08.local ceph-mon[56824]: pgmap v2252: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:46 vm00.local ceph-mon[47668]: pgmap v2252: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:49.881 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:49 vm08.local ceph-mon[56824]: pgmap v2253: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:18:49.881 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:49.881 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:49.881 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:49.881 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:18:49.881 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:18:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:49 vm00.local ceph-mon[47668]: pgmap v2253: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:18:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:18:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:18:50.519 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:50.519 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:50.549 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:50.550 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:51 vm08.local ceph-mon[56824]: pgmap v2254: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:18:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:51 vm00.local ceph-mon[47668]: pgmap v2254: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:18:53.872 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:53 vm00.local ceph-mon[47668]: pgmap v2255: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:53 vm08.local ceph-mon[56824]: pgmap v2255: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:55.551 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:18:55.552 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:18:55.578 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:18:55.578 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:18:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:55 vm08.local ceph-mon[56824]: pgmap v2256: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:18:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:55 vm00.local ceph-mon[47668]: pgmap v2256: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:18:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:57 vm08.local ceph-mon[56824]: pgmap v2257: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:57 vm00.local ceph-mon[47668]: pgmap v2257: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:18:59 vm08.local ceph-mon[56824]: pgmap v2258: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:18:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:18:59 vm00.local ceph-mon[47668]: pgmap v2258: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:00.580 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:00.580 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:00.608 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:00.609 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:01.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:01 vm08.local ceph-mon[56824]: pgmap v2259: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:01 vm00.local ceph-mon[47668]: pgmap v2259: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:03.874 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:03 vm00.local ceph-mon[47668]: pgmap v2260: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:03 vm08.local ceph-mon[56824]: pgmap v2260: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:05.611 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:05.611 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:05.637 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:05.637 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:05 vm08.local ceph-mon[56824]: pgmap v2261: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:05 vm00.local ceph-mon[47668]: pgmap v2261: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:19:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:19:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:19:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:19:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:07 vm08.local ceph-mon[56824]: pgmap v2262: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:07 vm00.local ceph-mon[47668]: pgmap v2262: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:09.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:09 vm08.local ceph-mon[56824]: pgmap v2263: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:09 vm00.local ceph-mon[47668]: pgmap v2263: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:10.639 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:10.639 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:10.665 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:10.666 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:11 vm00.local ceph-mon[47668]: pgmap v2264: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:11.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:11 vm08.local ceph-mon[56824]: pgmap v2264: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:13.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:13 vm00.local ceph-mon[47668]: pgmap v2265: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:13 vm08.local ceph-mon[56824]: pgmap v2265: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:15.667 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:15.667 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:15.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:15 vm00.local ceph-mon[47668]: pgmap v2266: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:15.697 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:15.698 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:15.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:15 vm08.local ceph-mon[56824]: pgmap v2266: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:17 vm00.local ceph-mon[47668]: pgmap v2267: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:17 vm08.local ceph-mon[56824]: pgmap v2267: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:19.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:19 vm00.local ceph-mon[47668]: pgmap v2268: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:19 vm08.local ceph-mon[56824]: pgmap v2268: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:20.699 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:20.700 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:20.728 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:20.729 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:21 vm08.local ceph-mon[56824]: pgmap v2269: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:21 vm00.local ceph-mon[47668]: pgmap v2269: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:23.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:23 vm00.local ceph-mon[47668]: pgmap v2270: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:23 vm08.local ceph-mon[56824]: pgmap v2270: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:25.730 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:25.731 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:25.757 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:25.758 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:25 vm08.local ceph-mon[56824]: pgmap v2271: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:25 vm00.local ceph-mon[47668]: pgmap v2271: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:27 vm08.local ceph-mon[56824]: pgmap v2272: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:27 vm00.local ceph-mon[47668]: pgmap v2272: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:29 vm08.local ceph-mon[56824]: pgmap v2273: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:29 vm00.local ceph-mon[47668]: pgmap v2273: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:30.759 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:30.760 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:30.788 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:30.789 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:31 vm08.local ceph-mon[56824]: pgmap v2274: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:31 vm00.local ceph-mon[47668]: pgmap v2274: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:33.875 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:33 vm00.local ceph-mon[47668]: pgmap v2275: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:33 vm08.local ceph-mon[56824]: pgmap v2275: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:35.791 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:35.791 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:35.825 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:35.826 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:35 vm08.local ceph-mon[56824]: pgmap v2276: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:35 vm00.local ceph-mon[47668]: pgmap v2276: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:37.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:37 vm08.local ceph-mon[56824]: pgmap v2277: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:37 vm00.local ceph-mon[47668]: pgmap v2277: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:39 vm08.local ceph-mon[56824]: pgmap v2278: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:39 vm00.local ceph-mon[47668]: pgmap v2278: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:40.827 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:40.828 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:40.854 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:40.855 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:41 vm08.local ceph-mon[56824]: pgmap v2279: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:41 vm00.local ceph-mon[47668]: pgmap v2279: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:43.876 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:43 vm00.local ceph-mon[47668]: pgmap v2280: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:43 vm08.local ceph-mon[56824]: pgmap v2280: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:45.856 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:45.857 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:45 vm08.local ceph-mon[56824]: pgmap v2281: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:45.883 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:45.884 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:45 vm00.local ceph-mon[47668]: pgmap v2281: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:47.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:47 vm08.local ceph-mon[56824]: pgmap v2282: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:47 vm00.local ceph-mon[47668]: pgmap v2282: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:49.551 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:49 vm08.local ceph-mon[56824]: pgmap v2283: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:49.551 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:49.551 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:49.551 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:49.551 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:19:49.551 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:19:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:49 vm00.local ceph-mon[47668]: pgmap v2283: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:19:49.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:19:50.885 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:50.886 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:50.912 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:50.913 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:51 vm08.local ceph-mon[56824]: pgmap v2284: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:51 vm00.local ceph-mon[47668]: pgmap v2284: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:53.877 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:53 vm00.local ceph-mon[47668]: pgmap v2285: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:53 vm08.local ceph-mon[56824]: pgmap v2285: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:55 vm08.local ceph-mon[56824]: pgmap v2286: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:55.914 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:19:55.915 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:19:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:55 vm00.local ceph-mon[47668]: pgmap v2286: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:55.942 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:19:55.943 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:19:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:57 vm08.local ceph-mon[56824]: pgmap v2287: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:57 vm00.local ceph-mon[47668]: pgmap v2287: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:19:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:19:59 vm08.local ceph-mon[56824]: pgmap v2288: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:19:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:19:59 vm00.local ceph-mon[47668]: pgmap v2288: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:00.944 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:00.945 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:01.099 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:01.100 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:00 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-09T00:20:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:00 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-09T00:20:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:01 vm08.local ceph-mon[56824]: pgmap v2289: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:02.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:01 vm00.local ceph-mon[47668]: pgmap v2289: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:03.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:02 vm00.local ceph-mon[47668]: pgmap v2290: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:02 vm08.local ceph-mon[56824]: pgmap v2290: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:05.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:05 vm00.local ceph-mon[47668]: pgmap v2291: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:05 vm08.local ceph-mon[56824]: pgmap v2291: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:06.101 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:06.102 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:06.127 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:06.128 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:20:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:20:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:20:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:20:07.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:07 vm00.local ceph-mon[47668]: pgmap v2292: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:07 vm08.local ceph-mon[56824]: pgmap v2292: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:09.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:09 vm00.local ceph-mon[47668]: pgmap v2293: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:09 vm08.local ceph-mon[56824]: pgmap v2293: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:11.130 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:11.130 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:11.157 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:11.158 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:11.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:11 vm00.local ceph-mon[47668]: pgmap v2294: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:11 vm08.local ceph-mon[56824]: pgmap v2294: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:13.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:13 vm00.local ceph-mon[47668]: pgmap v2295: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:13 vm08.local ceph-mon[56824]: pgmap v2295: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:15.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:15 vm00.local ceph-mon[47668]: pgmap v2296: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:15 vm08.local ceph-mon[56824]: pgmap v2296: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:16.159 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:16.160 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:16.186 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:16.186 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:17.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:17 vm00.local ceph-mon[47668]: pgmap v2297: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:17 vm08.local ceph-mon[56824]: pgmap v2297: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:19 vm08.local ceph-mon[56824]: pgmap v2298: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:19 vm00.local ceph-mon[47668]: pgmap v2298: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:21.188 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:21.188 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:21.215 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:21.215 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:21 vm08.local ceph-mon[56824]: pgmap v2299: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:21 vm00.local ceph-mon[47668]: pgmap v2299: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:23 vm08.local ceph-mon[56824]: pgmap v2300: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:23.880 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:23 vm00.local ceph-mon[47668]: pgmap v2300: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:25 vm08.local ceph-mon[56824]: pgmap v2301: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:25 vm00.local ceph-mon[47668]: pgmap v2301: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:26.217 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:26.217 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:26.243 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:26.244 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:27 vm08.local ceph-mon[56824]: pgmap v2302: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:27 vm00.local ceph-mon[47668]: pgmap v2302: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:29 vm08.local ceph-mon[56824]: pgmap v2303: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:29 vm00.local ceph-mon[47668]: pgmap v2303: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:31.245 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:31.246 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:31.271 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:31.272 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:31 vm08.local ceph-mon[56824]: pgmap v2304: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:31 vm00.local ceph-mon[47668]: pgmap v2304: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:33 vm08.local ceph-mon[56824]: pgmap v2305: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:33.881 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:33 vm00.local ceph-mon[47668]: pgmap v2305: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:35 vm08.local ceph-mon[56824]: pgmap v2306: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:35 vm00.local ceph-mon[47668]: pgmap v2306: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:36.273 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:36.274 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:36.300 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:36.300 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:37 vm08.local ceph-mon[56824]: pgmap v2307: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:37 vm00.local ceph-mon[47668]: pgmap v2307: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:39.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:39 vm08.local ceph-mon[56824]: pgmap v2308: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:39.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:39 vm00.local ceph-mon[47668]: pgmap v2308: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:41.302 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:41.302 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:41.331 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:41.332 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:41 vm08.local ceph-mon[56824]: pgmap v2309: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:41 vm00.local ceph-mon[47668]: pgmap v2309: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:43 vm08.local ceph-mon[56824]: pgmap v2310: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:43.883 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:43 vm00.local ceph-mon[47668]: pgmap v2310: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:45 vm08.local ceph-mon[56824]: pgmap v2311: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:45 vm00.local ceph-mon[47668]: pgmap v2311: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:46.333 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:46.334 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:46.360 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:46.361 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:47 vm08.local ceph-mon[56824]: pgmap v2312: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:47 vm00.local ceph-mon[47668]: pgmap v2312: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:49.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:49 vm00.local ceph-mon[47668]: pgmap v2313: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:49.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:49.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:49.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:49 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:49 vm08.local ceph-mon[56824]: pgmap v2313: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:49 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:50.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:20:50.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:20:50.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:50 vm00.local ceph-mon[47668]: pgmap v2314: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:20:51.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:20:51.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:50 vm08.local ceph-mon[56824]: pgmap v2314: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:51.362 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:51.363 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:51.393 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:51.393 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:53 vm08.local ceph-mon[56824]: pgmap v2315: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:53.882 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:53 vm00.local ceph-mon[47668]: pgmap v2315: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:55 vm08.local ceph-mon[56824]: pgmap v2316: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:55 vm00.local ceph-mon[47668]: pgmap v2316: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:56.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:20:56.395 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:20:56.423 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:20:56.424 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:20:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:57 vm08.local ceph-mon[56824]: pgmap v2317: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:57 vm00.local ceph-mon[47668]: pgmap v2317: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:20:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:20:59 vm08.local ceph-mon[56824]: pgmap v2318: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:20:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:20:59 vm00.local ceph-mon[47668]: pgmap v2318: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:01.425 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:01.426 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:01.453 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:01.454 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:01 vm08.local ceph-mon[56824]: pgmap v2319: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:01 vm00.local ceph-mon[47668]: pgmap v2319: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:03 vm08.local ceph-mon[56824]: pgmap v2320: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:03.883 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:03 vm00.local ceph-mon[47668]: pgmap v2320: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:05 vm08.local ceph-mon[56824]: pgmap v2321: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:05 vm00.local ceph-mon[47668]: pgmap v2321: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:06.455 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:06.456 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:06.484 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:06.484 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:21:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:21:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:21:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:21:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:07 vm08.local ceph-mon[56824]: pgmap v2322: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:07 vm00.local ceph-mon[47668]: pgmap v2322: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:09 vm08.local ceph-mon[56824]: pgmap v2323: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:09 vm00.local ceph-mon[47668]: pgmap v2323: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:11.486 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:11.487 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:11.514 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:11.514 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:11 vm08.local ceph-mon[56824]: pgmap v2324: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:11 vm00.local ceph-mon[47668]: pgmap v2324: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:13 vm08.local ceph-mon[56824]: pgmap v2325: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:13.884 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:13 vm00.local ceph-mon[47668]: pgmap v2325: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:15 vm08.local ceph-mon[56824]: pgmap v2326: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:15 vm00.local ceph-mon[47668]: pgmap v2326: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:16.516 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:16.516 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:16.544 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:16.545 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:17 vm08.local ceph-mon[56824]: pgmap v2327: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:17 vm00.local ceph-mon[47668]: pgmap v2327: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:19 vm08.local ceph-mon[56824]: pgmap v2328: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:19 vm00.local ceph-mon[47668]: pgmap v2328: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:21.546 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:21.547 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:21.576 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:21.577 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:21 vm08.local ceph-mon[56824]: pgmap v2329: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:21 vm00.local ceph-mon[47668]: pgmap v2329: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:23 vm08.local ceph-mon[56824]: pgmap v2330: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:23.885 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:23 vm00.local ceph-mon[47668]: pgmap v2330: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:25 vm08.local ceph-mon[56824]: pgmap v2331: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:25 vm00.local ceph-mon[47668]: pgmap v2331: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:26.578 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:26.578 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:26.609 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:26.609 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:27 vm08.local ceph-mon[56824]: pgmap v2332: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:27 vm00.local ceph-mon[47668]: pgmap v2332: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:29 vm08.local ceph-mon[56824]: pgmap v2333: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:29 vm00.local ceph-mon[47668]: pgmap v2333: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:31.610 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:31.611 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:31.638 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:31.639 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:31 vm08.local ceph-mon[56824]: pgmap v2334: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:31 vm00.local ceph-mon[47668]: pgmap v2334: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:33 vm08.local ceph-mon[56824]: pgmap v2335: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:33.886 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:33 vm00.local ceph-mon[47668]: pgmap v2335: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:35 vm08.local ceph-mon[56824]: pgmap v2336: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:35 vm00.local ceph-mon[47668]: pgmap v2336: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:36.640 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:36.641 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:36.667 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:36.667 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:37 vm08.local ceph-mon[56824]: pgmap v2337: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:37 vm00.local ceph-mon[47668]: pgmap v2337: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:39 vm08.local ceph-mon[56824]: pgmap v2338: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:39 vm00.local ceph-mon[47668]: pgmap v2338: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:40.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:40 vm08.local ceph-mon[56824]: pgmap v2339: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:40.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:40 vm00.local ceph-mon[47668]: pgmap v2339: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:41.668 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:41.669 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:41.695 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:41.696 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:43 vm08.local ceph-mon[56824]: pgmap v2340: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:43.887 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:43 vm00.local ceph-mon[47668]: pgmap v2340: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:45 vm08.local ceph-mon[56824]: pgmap v2341: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:45 vm00.local ceph-mon[47668]: pgmap v2341: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:46.697 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:46.697 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:46.723 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:46.723 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:47 vm08.local ceph-mon[56824]: pgmap v2342: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:47 vm00.local ceph-mon[47668]: pgmap v2342: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:49 vm08.local ceph-mon[56824]: pgmap v2343: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:49.888 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:49 vm00.local ceph-mon[47668]: pgmap v2343: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:21:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:21:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:50.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:21:50.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:21:51.725 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:51.725 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:51.751 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:51.752 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:51 vm08.local ceph-mon[56824]: pgmap v2344: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:51 vm00.local ceph-mon[47668]: pgmap v2344: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:53 vm08.local ceph-mon[56824]: pgmap v2345: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:53.888 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:53 vm00.local ceph-mon[47668]: pgmap v2345: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:55 vm08.local ceph-mon[56824]: pgmap v2346: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:55 vm00.local ceph-mon[47668]: pgmap v2346: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:56.753 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:21:56.754 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:21:56.779 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:21:56.780 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:21:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:57 vm08.local ceph-mon[56824]: pgmap v2347: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:57 vm00.local ceph-mon[47668]: pgmap v2347: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:21:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:21:59 vm08.local ceph-mon[56824]: pgmap v2348: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:21:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:21:59 vm00.local ceph-mon[47668]: pgmap v2348: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:01.781 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:01.782 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:01.810 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:01.810 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:01 vm08.local ceph-mon[56824]: pgmap v2349: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:01 vm00.local ceph-mon[47668]: pgmap v2349: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:03 vm08.local ceph-mon[56824]: pgmap v2350: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:03.889 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:03 vm00.local ceph-mon[47668]: pgmap v2350: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:05 vm08.local ceph-mon[56824]: pgmap v2351: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:05 vm00.local ceph-mon[47668]: pgmap v2351: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:22:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:22:06.812 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:06.812 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:06.839 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:06.839 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:22:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:22:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:07 vm08.local ceph-mon[56824]: pgmap v2352: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:07 vm00.local ceph-mon[47668]: pgmap v2352: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:09 vm08.local ceph-mon[56824]: pgmap v2353: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:09 vm00.local ceph-mon[47668]: pgmap v2353: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:11.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:10 vm08.local ceph-mon[56824]: pgmap v2354: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:10 vm00.local ceph-mon[47668]: pgmap v2354: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:11.841 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:11.841 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:11.870 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:11.870 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:13.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:13 vm08.local ceph-mon[56824]: pgmap v2355: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:13.890 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:13 vm00.local ceph-mon[47668]: pgmap v2355: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:15 vm08.local ceph-mon[56824]: pgmap v2356: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:15 vm00.local ceph-mon[47668]: pgmap v2356: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:16.872 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:16.872 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:16.898 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:16.898 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:17 vm08.local ceph-mon[56824]: pgmap v2357: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:17 vm00.local ceph-mon[47668]: pgmap v2357: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:19 vm08.local ceph-mon[56824]: pgmap v2358: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:19 vm00.local ceph-mon[47668]: pgmap v2358: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:20 vm00.local ceph-mon[47668]: pgmap v2359: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:20 vm08.local ceph-mon[56824]: pgmap v2359: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:21.900 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:21.900 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:21.927 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:21.928 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:23 vm08.local ceph-mon[56824]: pgmap v2360: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:23.891 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:23 vm00.local ceph-mon[47668]: pgmap v2360: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:25 vm08.local ceph-mon[56824]: pgmap v2361: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:25 vm00.local ceph-mon[47668]: pgmap v2361: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:26.929 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:26.930 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:26.957 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:26.957 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:27 vm08.local ceph-mon[56824]: pgmap v2362: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:27 vm00.local ceph-mon[47668]: pgmap v2362: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:29 vm08.local ceph-mon[56824]: pgmap v2363: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:29 vm00.local ceph-mon[47668]: pgmap v2363: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:31 vm08.local ceph-mon[56824]: pgmap v2364: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:31 vm00.local ceph-mon[47668]: pgmap v2364: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:31.958 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:31.959 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:31.985 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:31.985 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:33 vm08.local ceph-mon[56824]: pgmap v2365: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:33.892 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:33 vm00.local ceph-mon[47668]: pgmap v2365: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:35 vm08.local ceph-mon[56824]: pgmap v2366: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:35 vm00.local ceph-mon[47668]: pgmap v2366: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:36.987 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:36.987 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:37.086 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:37.086 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:37 vm08.local ceph-mon[56824]: pgmap v2367: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:37 vm00.local ceph-mon[47668]: pgmap v2367: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:39 vm08.local ceph-mon[56824]: pgmap v2368: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:39.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:39 vm00.local ceph-mon[47668]: pgmap v2368: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:41 vm08.local ceph-mon[56824]: pgmap v2369: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:41 vm00.local ceph-mon[47668]: pgmap v2369: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:42.088 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:42.089 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:42.114 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:42.115 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:43.692 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:43 vm00.local ceph-mon[47668]: pgmap v2370: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:43 vm08.local ceph-mon[56824]: pgmap v2370: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:45 vm08.local ceph-mon[56824]: pgmap v2371: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:45 vm00.local ceph-mon[47668]: pgmap v2371: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:47.116 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:47.117 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:47.146 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:47.147 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:47 vm08.local ceph-mon[56824]: pgmap v2372: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:47 vm00.local ceph-mon[47668]: pgmap v2372: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:49 vm08.local ceph-mon[56824]: pgmap v2373: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:49.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:49 vm00.local ceph-mon[47668]: pgmap v2373: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:22:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:22:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:22:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:22:50.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:22:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:22:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:22:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:22:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:22:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:22:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:51 vm08.local ceph-mon[56824]: pgmap v2374: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:51 vm00.local ceph-mon[47668]: pgmap v2374: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:52.148 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:52.149 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:52.174 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:52.175 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:22:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:53 vm08.local ceph-mon[56824]: pgmap v2375: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:53.894 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:53 vm00.local ceph-mon[47668]: pgmap v2375: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:55 vm08.local ceph-mon[56824]: pgmap v2376: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:55 vm00.local ceph-mon[47668]: pgmap v2376: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:22:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:56 vm08.local ceph-mon[56824]: pgmap v2377: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:56.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:56 vm00.local ceph-mon[47668]: pgmap v2377: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:22:57.176 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:22:57.176 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:22:57.202 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:22:57.203 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:00.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:22:59 vm00.local ceph-mon[47668]: pgmap v2378: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:00.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:22:59 vm08.local ceph-mon[56824]: pgmap v2378: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:01.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:01 vm08.local ceph-mon[56824]: pgmap v2379: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:01.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:01 vm00.local ceph-mon[47668]: pgmap v2379: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:02.204 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:02.205 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:02.233 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:02.234 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:03 vm08.local ceph-mon[56824]: pgmap v2380: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:03.895 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:03 vm00.local ceph-mon[47668]: pgmap v2380: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:05 vm08.local ceph-mon[56824]: pgmap v2381: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:05 vm00.local ceph-mon[47668]: pgmap v2381: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:23:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:23:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:23:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:23:07.235 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:07.236 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:07.263 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:07.263 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:07 vm08.local ceph-mon[56824]: pgmap v2382: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:08.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:07 vm00.local ceph-mon[47668]: pgmap v2382: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:09 vm08.local ceph-mon[56824]: pgmap v2383: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:09 vm00.local ceph-mon[47668]: pgmap v2383: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:11.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:10 vm08.local ceph-mon[56824]: pgmap v2384: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:10 vm00.local ceph-mon[47668]: pgmap v2384: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:12.265 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:12.265 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:12.291 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:12.292 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:13.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:12 vm08.local ceph-mon[56824]: pgmap v2385: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:13.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:12 vm00.local ceph-mon[47668]: pgmap v2385: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:15.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:15 vm08.local ceph-mon[56824]: pgmap v2386: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:15 vm00.local ceph-mon[47668]: pgmap v2386: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:17.293 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:17.293 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:17.319 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:17.319 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:17 vm08.local ceph-mon[56824]: pgmap v2387: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:17 vm00.local ceph-mon[47668]: pgmap v2387: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:19.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:19 vm08.local ceph-mon[56824]: pgmap v2388: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:19 vm00.local ceph-mon[47668]: pgmap v2388: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:21.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:21 vm08.local ceph-mon[56824]: pgmap v2389: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:21 vm00.local ceph-mon[47668]: pgmap v2389: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:22.321 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:22.321 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:22.346 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:22.347 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:23.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:23 vm08.local ceph-mon[56824]: pgmap v2390: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:23.897 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:23 vm00.local ceph-mon[47668]: pgmap v2390: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:25 vm08.local ceph-mon[56824]: pgmap v2391: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:25 vm00.local ceph-mon[47668]: pgmap v2391: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:27.348 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:27.348 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:27.374 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:27.374 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:27 vm08.local ceph-mon[56824]: pgmap v2392: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:27 vm00.local ceph-mon[47668]: pgmap v2392: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:29.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:29 vm08.local ceph-mon[56824]: pgmap v2393: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:29 vm00.local ceph-mon[47668]: pgmap v2393: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:31 vm08.local ceph-mon[56824]: pgmap v2394: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:31 vm00.local ceph-mon[47668]: pgmap v2394: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:32.376 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:32.376 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:32.403 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:32.404 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:33 vm08.local ceph-mon[56824]: pgmap v2395: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:33.898 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:33 vm00.local ceph-mon[47668]: pgmap v2395: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:34.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:34 vm00.local ceph-mon[47668]: pgmap v2396: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:34 vm08.local ceph-mon[56824]: pgmap v2396: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:37.405 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:37.406 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:37.433 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:37.434 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:37 vm08.local ceph-mon[56824]: pgmap v2397: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:37 vm00.local ceph-mon[47668]: pgmap v2397: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:39 vm08.local ceph-mon[56824]: pgmap v2398: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:39 vm00.local ceph-mon[47668]: pgmap v2398: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:41 vm08.local ceph-mon[56824]: pgmap v2399: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:41 vm00.local ceph-mon[47668]: pgmap v2399: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:42.436 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:42.436 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:42.464 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:42.465 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:43 vm08.local ceph-mon[56824]: pgmap v2400: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:43.899 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:43 vm00.local ceph-mon[47668]: pgmap v2400: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:45 vm08.local ceph-mon[56824]: pgmap v2401: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:45.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:45 vm00.local ceph-mon[47668]: pgmap v2401: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:47.467 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:47.467 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:47.495 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:47.496 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:47.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:47 vm08.local ceph-mon[56824]: pgmap v2402: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:47 vm00.local ceph-mon[47668]: pgmap v2402: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:49 vm08.local ceph-mon[56824]: pgmap v2403: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:49 vm00.local ceph-mon[47668]: pgmap v2403: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:50.605 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:50.606 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:50.606 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:50.606 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:23:50.606 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:23:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:23:50.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:23:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:51 vm08.local ceph-mon[56824]: pgmap v2404: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:51 vm00.local ceph-mon[47668]: pgmap v2404: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:52.497 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:52.497 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:52.526 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:52.526 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:53 vm08.local ceph-mon[56824]: pgmap v2405: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:53.900 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:53 vm00.local ceph-mon[47668]: pgmap v2405: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:55 vm08.local ceph-mon[56824]: pgmap v2406: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:55 vm00.local ceph-mon[47668]: pgmap v2406: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:23:57.528 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:23:57.528 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:23:57.553 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:23:57.554 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:23:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:57 vm08.local ceph-mon[56824]: pgmap v2407: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:57 vm00.local ceph-mon[47668]: pgmap v2407: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:23:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:23:59 vm00.local ceph-mon[47668]: pgmap v2408: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:23:59 vm08.local ceph-mon[56824]: pgmap v2408: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:00 vm08.local ceph-mon[56824]: pgmap v2409: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:00 vm00.local ceph-mon[47668]: pgmap v2409: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:02.555 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:02.556 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:02.581 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:02.582 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:03 vm08.local ceph-mon[56824]: pgmap v2410: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:03.901 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:03 vm00.local ceph-mon[47668]: pgmap v2410: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:05.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:05 vm08.local ceph-mon[56824]: pgmap v2411: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:05 vm00.local ceph-mon[47668]: pgmap v2411: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:24:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:24:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:24:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:24:07.583 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:07.584 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:07.609 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:07.609 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:07.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:07 vm08.local ceph-mon[56824]: pgmap v2412: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:07 vm00.local ceph-mon[47668]: pgmap v2412: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:09 vm08.local ceph-mon[56824]: pgmap v2413: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:09 vm00.local ceph-mon[47668]: pgmap v2413: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:11 vm08.local ceph-mon[56824]: pgmap v2414: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:11 vm00.local ceph-mon[47668]: pgmap v2414: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:12.611 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:12.611 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:12.638 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:12.639 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:13 vm08.local ceph-mon[56824]: pgmap v2415: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:13.902 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:13 vm00.local ceph-mon[47668]: pgmap v2415: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:15.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:15 vm08.local ceph-mon[56824]: pgmap v2416: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:15 vm00.local ceph-mon[47668]: pgmap v2416: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:17.640 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:17.640 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:17.666 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:17.667 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:17 vm08.local ceph-mon[56824]: pgmap v2417: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:17 vm00.local ceph-mon[47668]: pgmap v2417: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:19 vm08.local ceph-mon[56824]: pgmap v2418: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:19 vm00.local ceph-mon[47668]: pgmap v2418: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:21 vm00.local ceph-mon[47668]: pgmap v2419: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:22.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:21 vm08.local ceph-mon[56824]: pgmap v2419: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:22.668 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:22.668 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:22.694 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:22.695 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:23.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:22 vm00.local ceph-mon[47668]: pgmap v2420: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:23.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:22 vm08.local ceph-mon[56824]: pgmap v2420: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:25.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:25 vm08.local ceph-mon[56824]: pgmap v2421: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:25 vm00.local ceph-mon[47668]: pgmap v2421: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:27.696 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:27.697 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:27.723 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:27.723 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:27 vm08.local ceph-mon[56824]: pgmap v2422: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:27 vm00.local ceph-mon[47668]: pgmap v2422: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:29 vm08.local ceph-mon[56824]: pgmap v2423: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:29 vm00.local ceph-mon[47668]: pgmap v2423: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:31 vm08.local ceph-mon[56824]: pgmap v2424: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:31 vm00.local ceph-mon[47668]: pgmap v2424: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:32.725 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:32.726 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:32.752 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:32.753 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:33 vm08.local ceph-mon[56824]: pgmap v2425: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:33.902 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:33 vm00.local ceph-mon[47668]: pgmap v2425: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:35 vm00.local ceph-mon[47668]: pgmap v2426: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:36.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:35 vm08.local ceph-mon[56824]: pgmap v2426: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:37.754 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:37.755 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:37.780 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:37.781 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:37 vm00.local ceph-mon[47668]: pgmap v2427: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:38.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:37 vm08.local ceph-mon[56824]: pgmap v2427: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:40.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:39 vm08.local ceph-mon[56824]: pgmap v2428: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:39 vm00.local ceph-mon[47668]: pgmap v2428: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:40 vm08.local ceph-mon[56824]: pgmap v2429: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:41.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:40 vm00.local ceph-mon[47668]: pgmap v2429: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:42.782 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:42.783 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:42.812 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:42.813 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:43.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:43 vm08.local ceph-mon[56824]: pgmap v2430: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:43.902 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:43 vm00.local ceph-mon[47668]: pgmap v2430: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:45.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:45 vm08.local ceph-mon[56824]: pgmap v2431: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:45 vm00.local ceph-mon[47668]: pgmap v2431: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:47.814 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:47.815 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:47.842 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:47.842 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:47.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:47 vm08.local ceph-mon[56824]: pgmap v2432: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:47 vm00.local ceph-mon[47668]: pgmap v2432: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:49 vm08.local ceph-mon[56824]: pgmap v2433: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:49 vm00.local ceph-mon[47668]: pgmap v2433: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:50.698 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:50.699 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:50.699 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:50 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:50.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:50 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:51 vm08.local ceph-mon[56824]: pgmap v2434: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:24:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:24:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:24:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:51 vm00.local ceph-mon[47668]: pgmap v2434: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:24:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:24:51.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:24:52.844 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:52.844 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:52.870 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:52.871 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:53 vm08.local ceph-mon[56824]: pgmap v2435: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:53.903 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:53 vm00.local ceph-mon[47668]: pgmap v2435: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:55 vm08.local ceph-mon[56824]: pgmap v2436: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:55 vm00.local ceph-mon[47668]: pgmap v2436: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:57.872 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:24:57.873 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:24:57.901 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:24:57.902 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:24:58.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:57 vm08.local ceph-mon[56824]: pgmap v2437: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:58.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:57 vm00.local ceph-mon[47668]: pgmap v2437: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:24:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:24:59 vm08.local ceph-mon[56824]: pgmap v2438: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:24:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:24:59 vm00.local ceph-mon[47668]: pgmap v2438: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:01 vm08.local ceph-mon[56824]: pgmap v2439: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:01 vm00.local ceph-mon[47668]: pgmap v2439: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:02.904 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:02.905 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:02.934 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:02.935 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:03 vm08.local ceph-mon[56824]: pgmap v2440: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:03.904 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:03 vm00.local ceph-mon[47668]: pgmap v2440: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:05.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:05 vm08.local ceph-mon[56824]: pgmap v2441: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:05.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:05 vm00.local ceph-mon[47668]: pgmap v2441: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:25:06.779 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:25:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:25:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:25:07.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:07 vm08.local ceph-mon[56824]: pgmap v2442: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:07 vm00.local ceph-mon[47668]: pgmap v2442: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:07.937 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:07.938 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:07.965 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:07.966 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:09 vm08.local ceph-mon[56824]: pgmap v2443: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:09 vm00.local ceph-mon[47668]: pgmap v2443: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:11 vm00.local ceph-mon[47668]: pgmap v2444: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:11 vm08.local ceph-mon[56824]: pgmap v2444: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:12.967 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:12.967 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:12.994 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:12.995 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:13.905 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:13 vm00.local ceph-mon[47668]: pgmap v2445: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:13 vm08.local ceph-mon[56824]: pgmap v2445: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:15 vm00.local ceph-mon[47668]: pgmap v2446: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:16.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:15 vm08.local ceph-mon[56824]: pgmap v2446: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:16.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:16 vm08.local ceph-mon[56824]: pgmap v2447: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:16.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:16 vm00.local ceph-mon[47668]: pgmap v2447: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:17.996 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:17.997 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:18.022 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:18.022 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:19 vm08.local ceph-mon[56824]: pgmap v2448: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:19 vm00.local ceph-mon[47668]: pgmap v2448: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:21 vm08.local ceph-mon[56824]: pgmap v2449: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:21 vm00.local ceph-mon[47668]: pgmap v2449: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:23.024 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:23.024 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:23.051 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:23.052 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:23.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:23 vm08.local ceph-mon[56824]: pgmap v2450: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:23.906 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:23 vm00.local ceph-mon[47668]: pgmap v2450: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:25 vm00.local ceph-mon[47668]: pgmap v2451: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:26.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:25 vm08.local ceph-mon[56824]: pgmap v2451: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:26.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:26 vm08.local ceph-mon[56824]: pgmap v2452: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:26.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:26 vm00.local ceph-mon[47668]: pgmap v2452: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:28.053 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:28.054 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:28.079 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:28.080 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:29 vm08.local ceph-mon[56824]: pgmap v2453: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:29 vm00.local ceph-mon[47668]: pgmap v2453: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:31 vm08.local ceph-mon[56824]: pgmap v2454: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:31 vm00.local ceph-mon[47668]: pgmap v2454: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:33.082 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:33.083 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:33.113 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:33.114 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:33 vm08.local ceph-mon[56824]: pgmap v2455: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:33.906 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:33 vm00.local ceph-mon[47668]: pgmap v2455: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:35.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:35 vm08.local ceph-mon[56824]: pgmap v2456: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:35.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:35 vm00.local ceph-mon[47668]: pgmap v2456: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:37.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:37 vm08.local ceph-mon[56824]: pgmap v2457: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:37 vm00.local ceph-mon[47668]: pgmap v2457: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:38.115 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:38.116 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:38.141 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:38.142 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:39 vm00.local ceph-mon[47668]: pgmap v2458: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:40.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:39 vm08.local ceph-mon[56824]: pgmap v2458: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:41 vm00.local ceph-mon[47668]: pgmap v2459: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:42.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:41 vm08.local ceph-mon[56824]: pgmap v2459: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:43.144 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:43.144 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:43.171 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:43.171 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:43.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:43 vm00.local ceph-mon[47668]: pgmap v2460: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:44.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:43 vm08.local ceph-mon[56824]: pgmap v2460: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:45 vm00.local ceph-mon[47668]: pgmap v2461: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:46.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:45 vm08.local ceph-mon[56824]: pgmap v2461: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:47.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:46 vm08.local ceph-mon[56824]: pgmap v2462: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:47.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:46 vm00.local ceph-mon[47668]: pgmap v2462: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:48.173 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:48.174 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:48.202 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:48.203 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:49 vm08.local ceph-mon[56824]: pgmap v2463: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:49 vm00.local ceph-mon[47668]: pgmap v2463: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: pgmap v2464: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:25:51.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:25:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: pgmap v2464: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:25:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:25:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:25:51.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:25:51.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:25:51.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:25:53.205 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:53.205 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:53.231 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:53.231 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:53.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:53 vm00.local ceph-mon[47668]: pgmap v2465: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:54.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:53 vm08.local ceph-mon[56824]: pgmap v2465: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:55 vm00.local ceph-mon[47668]: pgmap v2466: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:56.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:55 vm08.local ceph-mon[56824]: pgmap v2466: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:56.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:56 vm08.local ceph-mon[56824]: pgmap v2467: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:56 vm00.local ceph-mon[47668]: pgmap v2467: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:25:58.233 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:25:58.233 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:25:58.470 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:25:58.483 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:25:59.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:25:58 vm00.local ceph-mon[47668]: pgmap v2468: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:25:59.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:25:58 vm08.local ceph-mon[56824]: pgmap v2468: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:02.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:01 vm08.local ceph-mon[56824]: pgmap v2469: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:02.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:01 vm00.local ceph-mon[47668]: pgmap v2469: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:03.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:03 vm08.local ceph-mon[56824]: pgmap v2470: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:03 vm00.local ceph-mon[47668]: pgmap v2470: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:03.473 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:03.474 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:03.504 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:03.504 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:05.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:05 vm00.local ceph-mon[47668]: pgmap v2471: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:06.081 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:05 vm08.local ceph-mon[56824]: pgmap v2471: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:26:06.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:26:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:26:06.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:26:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:07 vm00.local ceph-mon[47668]: pgmap v2472: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:08.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:07 vm08.local ceph-mon[56824]: pgmap v2472: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:08.506 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:08.506 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:08.532 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:08.532 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:09 vm00.local ceph-mon[47668]: pgmap v2473: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:10.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:09 vm08.local ceph-mon[56824]: pgmap v2473: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:11.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:10 vm00.local ceph-mon[47668]: pgmap v2474: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:11.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:10 vm08.local ceph-mon[56824]: pgmap v2474: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:13.087 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:12 vm00.local ceph-mon[47668]: pgmap v2475: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:12 vm08.local ceph-mon[56824]: pgmap v2475: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:13.534 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:13.535 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:13.562 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:13.563 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:14 vm08.local ceph-mon[56824]: pgmap v2476: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:14 vm00.local ceph-mon[47668]: pgmap v2476: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:17 vm08.local ceph-mon[56824]: pgmap v2477: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:17 vm00.local ceph-mon[47668]: pgmap v2477: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:18.564 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:18.565 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:18.664 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:18.665 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:19 vm00.local ceph-mon[47668]: pgmap v2478: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:20.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:19 vm08.local ceph-mon[56824]: pgmap v2478: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:20.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:20 vm00.local ceph-mon[47668]: pgmap v2479: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:21.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:20 vm08.local ceph-mon[56824]: pgmap v2479: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:23.666 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:23.667 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:23.693 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:23.693 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:23 vm08.local ceph-mon[56824]: pgmap v2480: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:23.910 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:23 vm00.local ceph-mon[47668]: pgmap v2480: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:25.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:25 vm00.local ceph-mon[47668]: pgmap v2481: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:26.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:25 vm08.local ceph-mon[56824]: pgmap v2481: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:27 vm00.local ceph-mon[47668]: pgmap v2482: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:28.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:27 vm08.local ceph-mon[56824]: pgmap v2482: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:28.695 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:28.696 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:28.722 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:28.723 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:29 vm00.local ceph-mon[47668]: pgmap v2483: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:30.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:29 vm08.local ceph-mon[56824]: pgmap v2483: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:31 vm00.local ceph-mon[47668]: pgmap v2484: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:32.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:31 vm08.local ceph-mon[56824]: pgmap v2484: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:33.724 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:33.724 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:33.750 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:33.750 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:33.911 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:33 vm00.local ceph-mon[47668]: pgmap v2485: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:33 vm08.local ceph-mon[56824]: pgmap v2485: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:34 vm08.local ceph-mon[56824]: pgmap v2486: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:34 vm00.local ceph-mon[47668]: pgmap v2486: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:37.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:37 vm08.local ceph-mon[56824]: pgmap v2487: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:37.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:37 vm00.local ceph-mon[47668]: pgmap v2487: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:38.752 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:38.752 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:38.781 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:38.782 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:39.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:39 vm00.local ceph-mon[47668]: pgmap v2488: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:40.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:39 vm08.local ceph-mon[56824]: pgmap v2488: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:41 vm00.local ceph-mon[47668]: pgmap v2489: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:42.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:41 vm08.local ceph-mon[56824]: pgmap v2489: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:43.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:42 vm00.local ceph-mon[47668]: pgmap v2490: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:43.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:42 vm08.local ceph-mon[56824]: pgmap v2490: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:43.783 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:43.784 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:43.811 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:43.812 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:45.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:45 vm00.local ceph-mon[47668]: pgmap v2491: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:46.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:45 vm08.local ceph-mon[56824]: pgmap v2491: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:47 vm00.local ceph-mon[47668]: pgmap v2492: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:48.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:47 vm08.local ceph-mon[56824]: pgmap v2492: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:48.813 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:48.814 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:48.840 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:48.841 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:50.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:49 vm08.local ceph-mon[56824]: pgmap v2493: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:50.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:49 vm00.local ceph-mon[47668]: pgmap v2493: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:51.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:50 vm08.local ceph-mon[56824]: pgmap v2494: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:51.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:50 vm00.local ceph-mon[47668]: pgmap v2494: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:51.764 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:26:51.764 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:26:51.764 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:26:51.764 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:26:51.764 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:26:51.764 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:51 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:26:52.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:26:52.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:26:52.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:26:52.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:26:52.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:26:52.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:51 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:26:53.086 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:52 vm00.local ceph-mon[47668]: pgmap v2495: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:53.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:52 vm08.local ceph-mon[56824]: pgmap v2495: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:53.842 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:53.843 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:53.869 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:53.870 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:55 vm00.local ceph-mon[47668]: pgmap v2496: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:56.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:55 vm08.local ceph-mon[56824]: pgmap v2496: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:26:56.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:56 vm08.local ceph-mon[56824]: pgmap v2497: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:57.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:56 vm00.local ceph-mon[47668]: pgmap v2497: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:26:58.871 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:26:58.872 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:26:58.898 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:26:58.899 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:26:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:26:59 vm00.local ceph-mon[47668]: pgmap v2498: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:26:59 vm08.local ceph-mon[56824]: pgmap v2498: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:01 vm00.local ceph-mon[47668]: pgmap v2499: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:01 vm08.local ceph-mon[56824]: pgmap v2499: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:03.088 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:02 vm00.local ceph-mon[47668]: pgmap v2500: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:02 vm08.local ceph-mon[56824]: pgmap v2500: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:03.900 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:03.901 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:03.935 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:03.936 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:05.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:04 vm08.local ceph-mon[56824]: pgmap v2501: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:04 vm00.local ceph-mon[47668]: pgmap v2501: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:27:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:27:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:27:06.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:27:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:06 vm08.local ceph-mon[56824]: pgmap v2502: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:06 vm00.local ceph-mon[47668]: pgmap v2502: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:08.938 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:08.938 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:08.972 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:08.972 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:09 vm00.local ceph-mon[47668]: pgmap v2503: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:10.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:09 vm08.local ceph-mon[56824]: pgmap v2503: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:11 vm00.local ceph-mon[47668]: pgmap v2504: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:11 vm08.local ceph-mon[56824]: pgmap v2504: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:13.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:12 vm08.local ceph-mon[56824]: pgmap v2505: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:13.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:12 vm00.local ceph-mon[47668]: pgmap v2505: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:13.974 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:13.974 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:14.002 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:14.002 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:15.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:15 vm00.local ceph-mon[47668]: pgmap v2506: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:16.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:15 vm08.local ceph-mon[56824]: pgmap v2506: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:16.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:16 vm08.local ceph-mon[56824]: pgmap v2507: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:17.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:16 vm00.local ceph-mon[47668]: pgmap v2507: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:19.004 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:19.005 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:19.031 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:19.031 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:19.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:19 vm08.local ceph-mon[56824]: pgmap v2508: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:19 vm00.local ceph-mon[47668]: pgmap v2508: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:21 vm08.local ceph-mon[56824]: pgmap v2509: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:21 vm00.local ceph-mon[47668]: pgmap v2509: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:23.916 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:23 vm00.local ceph-mon[47668]: pgmap v2510: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:24.033 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:24.033 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:24.060 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:24.060 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:24.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:23 vm08.local ceph-mon[56824]: pgmap v2510: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:24 vm08.local ceph-mon[56824]: pgmap v2511: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:24 vm00.local ceph-mon[47668]: pgmap v2511: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:27 vm08.local ceph-mon[56824]: pgmap v2512: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:27 vm00.local ceph-mon[47668]: pgmap v2512: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:29.062 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:29.062 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:29.091 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:29.092 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:29 vm08.local ceph-mon[56824]: pgmap v2513: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:29 vm00.local ceph-mon[47668]: pgmap v2513: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:31 vm08.local ceph-mon[56824]: pgmap v2514: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:31 vm00.local ceph-mon[47668]: pgmap v2514: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:33 vm08.local ceph-mon[56824]: pgmap v2515: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:33.918 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:33 vm00.local ceph-mon[47668]: pgmap v2515: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:34.094 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:34.094 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:34.121 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:34.122 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:34 vm08.local ceph-mon[56824]: pgmap v2516: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:34 vm00.local ceph-mon[47668]: pgmap v2516: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:37.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:37 vm08.local ceph-mon[56824]: pgmap v2517: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:37 vm00.local ceph-mon[47668]: pgmap v2517: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:39.038 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:38 vm00.local ceph-mon[47668]: pgmap v2518: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:39.123 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:39.124 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:38 vm08.local ceph-mon[56824]: pgmap v2518: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:39.156 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:39.157 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:41 vm08.local ceph-mon[56824]: pgmap v2519: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:41 vm00.local ceph-mon[47668]: pgmap v2519: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:43.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:43 vm08.local ceph-mon[56824]: pgmap v2520: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:43.919 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:43 vm00.local ceph-mon[47668]: pgmap v2520: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:44.158 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:44.159 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:44.184 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:44.185 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:44 vm08.local ceph-mon[56824]: pgmap v2521: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:44 vm00.local ceph-mon[47668]: pgmap v2521: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:47 vm08.local ceph-mon[56824]: pgmap v2522: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:47 vm00.local ceph-mon[47668]: pgmap v2522: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:49.186 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:49.187 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:49.214 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:49.215 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:49.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:49 vm08.local ceph-mon[56824]: pgmap v2523: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:49 vm00.local ceph-mon[47668]: pgmap v2523: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:51.799 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:51 vm08.local ceph-mon[56824]: pgmap v2524: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:51.804 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:51 vm00.local ceph-mon[47668]: pgmap v2524: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:52.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:52 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:27:52.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:52 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:27:52.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:52 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:27:52.731 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:52 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:27:52.731 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:52 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:27:52.731 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:52 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:27:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:53 vm00.local ceph-mon[47668]: pgmap v2525: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:27:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:53 vm08.local ceph-mon[56824]: pgmap v2525: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:27:53.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:53.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:27:54.216 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:54.217 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:54.242 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:54.242 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:55.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:54 vm08.local ceph-mon[56824]: pgmap v2526: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:54 vm00.local ceph-mon[47668]: pgmap v2526: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:57 vm08.local ceph-mon[56824]: pgmap v2527: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:57 vm00.local ceph-mon[47668]: pgmap v2527: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:27:59.243 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:27:59.244 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:27:59.269 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:27:59.270 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:27:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:27:59 vm08.local ceph-mon[56824]: pgmap v2528: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:27:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:27:59 vm00.local ceph-mon[47668]: pgmap v2528: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:01 vm08.local ceph-mon[56824]: pgmap v2529: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:01 vm00.local ceph-mon[47668]: pgmap v2529: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:03.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:03 vm08.local ceph-mon[56824]: pgmap v2530: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:03.921 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:03 vm00.local ceph-mon[47668]: pgmap v2530: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:04.271 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:04.271 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:04.298 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:04.298 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:04 vm08.local ceph-mon[56824]: pgmap v2531: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:04 vm00.local ceph-mon[47668]: pgmap v2531: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:28:06.029 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:28:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:28:06.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:28:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:06 vm08.local ceph-mon[56824]: pgmap v2532: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:06 vm00.local ceph-mon[47668]: pgmap v2532: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:09.300 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:09.300 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:09.328 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:09.329 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:09 vm08.local ceph-mon[56824]: pgmap v2533: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:09 vm00.local ceph-mon[47668]: pgmap v2533: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:11 vm08.local ceph-mon[56824]: pgmap v2534: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:11 vm00.local ceph-mon[47668]: pgmap v2534: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:13.707 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:13 vm00.local ceph-mon[47668]: pgmap v2535: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:13 vm08.local ceph-mon[56824]: pgmap v2535: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:14.331 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:14.332 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:14.359 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:14.359 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:15.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:14 vm08.local ceph-mon[56824]: pgmap v2536: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:14 vm00.local ceph-mon[47668]: pgmap v2536: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:17 vm08.local ceph-mon[56824]: pgmap v2537: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:17 vm00.local ceph-mon[47668]: pgmap v2537: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:19.361 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:19.361 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:19.388 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:19.389 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:19 vm08.local ceph-mon[56824]: pgmap v2538: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:19 vm00.local ceph-mon[47668]: pgmap v2538: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:21 vm08.local ceph-mon[56824]: pgmap v2539: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:21 vm00.local ceph-mon[47668]: pgmap v2539: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:23.849 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:23 vm00.local ceph-mon[47668]: pgmap v2540: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:23 vm08.local ceph-mon[56824]: pgmap v2540: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:24.390 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:24.391 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:24.486 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:24.486 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:25 vm08.local ceph-mon[56824]: pgmap v2541: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:25.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:25 vm00.local ceph-mon[47668]: pgmap v2541: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:27 vm08.local ceph-mon[56824]: pgmap v2542: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:27 vm00.local ceph-mon[47668]: pgmap v2542: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:29.488 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:29.488 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:29.589 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:29.589 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:29 vm08.local ceph-mon[56824]: pgmap v2543: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:29 vm00.local ceph-mon[47668]: pgmap v2543: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:31 vm08.local ceph-mon[56824]: pgmap v2544: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:31 vm00.local ceph-mon[47668]: pgmap v2544: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:33 vm08.local ceph-mon[56824]: pgmap v2545: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:33.923 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:33 vm00.local ceph-mon[47668]: pgmap v2545: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:34.591 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:34.591 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:34.622 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:34.623 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:34 vm08.local ceph-mon[56824]: pgmap v2546: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:34 vm00.local ceph-mon[47668]: pgmap v2546: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:37 vm08.local ceph-mon[56824]: pgmap v2547: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:37 vm00.local ceph-mon[47668]: pgmap v2547: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:39.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:38 vm08.local ceph-mon[56824]: pgmap v2548: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:39.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:38 vm00.local ceph-mon[47668]: pgmap v2548: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:39.624 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:39.625 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:39.651 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:39.651 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:41 vm08.local ceph-mon[56824]: pgmap v2549: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:41 vm00.local ceph-mon[47668]: pgmap v2549: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:43 vm08.local ceph-mon[56824]: pgmap v2550: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:43.924 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:43 vm00.local ceph-mon[47668]: pgmap v2550: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:44.652 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:44.653 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:44.679 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:44.680 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:44 vm08.local ceph-mon[56824]: pgmap v2551: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:44 vm00.local ceph-mon[47668]: pgmap v2551: 97 pgs: 97 active+clean; 453 KiB data, 82 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:47 vm08.local ceph-mon[56824]: pgmap v2552: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:47 vm00.local ceph-mon[47668]: pgmap v2552: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:49.681 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:49.681 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:49.708 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:49.708 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:49 vm08.local ceph-mon[56824]: pgmap v2553: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:49 vm00.local ceph-mon[47668]: pgmap v2553: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:51 vm08.local ceph-mon[56824]: pgmap v2554: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:51 vm00.local ceph-mon[47668]: pgmap v2554: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:53.699 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:53 vm00.local ceph-mon[47668]: pgmap v2555: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:53.699 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:28:53.699 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:28:53.699 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:53 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:28:53.709 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:53 vm08.local ceph-mon[56824]: pgmap v2555: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:53.709 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:28:53.709 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:28:53.709 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:53 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:28:54.710 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:54.710 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:54.737 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:54.738 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:28:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:28:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:55 vm08.local ceph-mon[56824]: pgmap v2556: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:28:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:28:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:54 vm00.local ceph-mon[47668]: pgmap v2556: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:57 vm08.local ceph-mon[56824]: pgmap v2557: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:57 vm00.local ceph-mon[47668]: pgmap v2557: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:28:59.739 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:28:59.739 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:28:59.766 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:28:59.766 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:28:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:28:59 vm08.local ceph-mon[56824]: pgmap v2558: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:28:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:28:59 vm00.local ceph-mon[47668]: pgmap v2558: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:01 vm00.local ceph-mon[47668]: pgmap v2559: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:01 vm08.local ceph-mon[56824]: pgmap v2559: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:02.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:02 vm00.local ceph-mon[47668]: pgmap v2560: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:03.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:02 vm08.local ceph-mon[56824]: pgmap v2560: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:04.768 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:04.768 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:04.795 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:04.795 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:05.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:04 vm08.local ceph-mon[56824]: pgmap v2561: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:04 vm00.local ceph-mon[47668]: pgmap v2561: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:06.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:29:06.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:29:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:29:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:29:07.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:06 vm08.local ceph-mon[56824]: pgmap v2562: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:06 vm00.local ceph-mon[47668]: pgmap v2562: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:09.797 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:09.797 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:09 vm00.local ceph-mon[47668]: pgmap v2563: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:09.947 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:09.948 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:10.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:09 vm08.local ceph-mon[56824]: pgmap v2563: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:11 vm08.local ceph-mon[56824]: pgmap v2564: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:12.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:11 vm00.local ceph-mon[47668]: pgmap v2564: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:13.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:12 vm00.local ceph-mon[47668]: pgmap v2565: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:13.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:12 vm08.local ceph-mon[56824]: pgmap v2565: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:14.949 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:14.950 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:15.346 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:15.346 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:14 vm08.local ceph-mon[56824]: pgmap v2566: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:15.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:14 vm00.local ceph-mon[47668]: pgmap v2566: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:17 vm08.local ceph-mon[56824]: pgmap v2567: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:17 vm00.local ceph-mon[47668]: pgmap v2567: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:19 vm08.local ceph-mon[56824]: pgmap v2568: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:19 vm00.local ceph-mon[47668]: pgmap v2568: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:20.347 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:20.348 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:20.374 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:20.374 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:21.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:21 vm08.local ceph-mon[56824]: pgmap v2569: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:21 vm00.local ceph-mon[47668]: pgmap v2569: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:23 vm08.local ceph-mon[56824]: pgmap v2570: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:29:23.927 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:23 vm00.local ceph-mon[47668]: pgmap v2570: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:29:25.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:24 vm08.local ceph-mon[56824]: pgmap v2571: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:29:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:24 vm00.local ceph-mon[47668]: pgmap v2571: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:29:25.376 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:25.377 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:25.403 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:25.404 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:27 vm08.local ceph-mon[56824]: pgmap v2572: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:27 vm00.local ceph-mon[47668]: pgmap v2572: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:29 vm08.local ceph-mon[56824]: pgmap v2573: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:29:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:29 vm00.local ceph-mon[47668]: pgmap v2573: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 85 B/s wr, 0 op/s 2026-03-09T00:29:30.405 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:30.405 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:30.495 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:30.495 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:31 vm08.local ceph-mon[56824]: pgmap v2574: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:31 vm00.local ceph-mon[47668]: pgmap v2574: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:33 vm08.local ceph-mon[56824]: pgmap v2575: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:33.928 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:33 vm00.local ceph-mon[47668]: pgmap v2575: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:35.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:34 vm08.local ceph-mon[56824]: pgmap v2576: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:34 vm00.local ceph-mon[47668]: pgmap v2576: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:35.497 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:35.497 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:35.524 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:35.524 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:37.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:36 vm08.local ceph-mon[56824]: pgmap v2577: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:37.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:36 vm00.local ceph-mon[47668]: pgmap v2577: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:40.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:39 vm08.local ceph-mon[56824]: pgmap v2578: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:40.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:39 vm00.local ceph-mon[47668]: pgmap v2578: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:40.525 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:40.526 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:40.552 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:40.553 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:41.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:40 vm08.local ceph-mon[56824]: pgmap v2579: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:41.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:40 vm00.local ceph-mon[47668]: pgmap v2579: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:43 vm08.local ceph-mon[56824]: pgmap v2580: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:43.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:43 vm00.local ceph-mon[47668]: pgmap v2580: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:45.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:44 vm08.local ceph-mon[56824]: pgmap v2581: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:44 vm00.local ceph-mon[47668]: pgmap v2581: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:45.554 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:45.555 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:45.582 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:45.582 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:47 vm08.local ceph-mon[56824]: pgmap v2582: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:47 vm00.local ceph-mon[47668]: pgmap v2582: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:49.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:48 vm08.local ceph-mon[56824]: pgmap v2583: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:49.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:48 vm00.local ceph-mon[47668]: pgmap v2583: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:50.584 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:50.584 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:50.611 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:50.612 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:51 vm08.local ceph-mon[56824]: pgmap v2584: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:51 vm00.local ceph-mon[47668]: pgmap v2584: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:53.829 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:53 vm08.local ceph-mon[56824]: pgmap v2585: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:53.838 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:53 vm00.local ceph-mon[47668]: pgmap v2585: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:29:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:29:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:29:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:29:54.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:29:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:29:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:29:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:29:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:29:54.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:29:55.613 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:29:55.613 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:29:55.639 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:29:55.639 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:29:55.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:55 vm08.local ceph-mon[56824]: pgmap v2586: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:55.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:55 vm00.local ceph-mon[47668]: pgmap v2586: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:29:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:57 vm08.local ceph-mon[56824]: pgmap v2587: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:57 vm00.local ceph-mon[47668]: pgmap v2587: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:29:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:29:59 vm00.local ceph-mon[47668]: pgmap v2588: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:00.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:29:59 vm08.local ceph-mon[56824]: pgmap v2588: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:00.641 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:00.641 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:00.672 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:00.672 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:01.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:00 vm08.local ceph-mon[56824]: overall HEALTH_OK 2026-03-09T00:30:01.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:00 vm00.local ceph-mon[47668]: overall HEALTH_OK 2026-03-09T00:30:02.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:01 vm08.local ceph-mon[56824]: pgmap v2589: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:02.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:01 vm00.local ceph-mon[47668]: pgmap v2589: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:03.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:03 vm00.local ceph-mon[47668]: pgmap v2590: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:03.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:03 vm08.local ceph-mon[56824]: pgmap v2590: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:05.627 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:05 vm08.local ceph-mon[56824]: pgmap v2591: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:05.674 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:05.674 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:05.680 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:05 vm00.local ceph-mon[47668]: pgmap v2591: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:05.783 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:05.784 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:30:06.529 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:06 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:30:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:30:06.679 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:06 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:30:07.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:07 vm08.local ceph-mon[56824]: pgmap v2592: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:07.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:07 vm00.local ceph-mon[47668]: pgmap v2592: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:09.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:09 vm08.local ceph-mon[56824]: pgmap v2593: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:09 vm00.local ceph-mon[47668]: pgmap v2593: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:10.785 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:10.786 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:10.815 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:10.815 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:11 vm08.local ceph-mon[56824]: pgmap v2594: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:11 vm00.local ceph-mon[47668]: pgmap v2594: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:13 vm08.local ceph-mon[56824]: pgmap v2595: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:13 vm00.local ceph-mon[47668]: pgmap v2595: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:15.128 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:14 vm08.local ceph-mon[56824]: pgmap v2596: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:15.180 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:14 vm00.local ceph-mon[47668]: pgmap v2596: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:15.817 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:15.817 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:15.843 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:15.843 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:17 vm08.local ceph-mon[56824]: pgmap v2597: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:17 vm00.local ceph-mon[47668]: pgmap v2597: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:19 vm08.local ceph-mon[56824]: pgmap v2598: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:19 vm00.local ceph-mon[47668]: pgmap v2598: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:20.844 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:20.845 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:20.871 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:20.872 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:21 vm08.local ceph-mon[56824]: pgmap v2599: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:21 vm00.local ceph-mon[47668]: pgmap v2599: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:23 vm08.local ceph-mon[56824]: pgmap v2600: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:23.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:23 vm00.local ceph-mon[47668]: pgmap v2600: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:24 vm00.local ceph-mon[47668]: pgmap v2601: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:24 vm08.local ceph-mon[56824]: pgmap v2601: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:25.873 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:25.874 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:25.901 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:25.902 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:27 vm08.local ceph-mon[56824]: pgmap v2602: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:27 vm00.local ceph-mon[47668]: pgmap v2602: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:29 vm08.local ceph-mon[56824]: pgmap v2603: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:29 vm00.local ceph-mon[47668]: pgmap v2603: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:30.903 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:30.904 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:30.931 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:30.932 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:31 vm08.local ceph-mon[56824]: pgmap v2604: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:31 vm00.local ceph-mon[47668]: pgmap v2604: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:33.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:33 vm08.local ceph-mon[56824]: pgmap v2605: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:33.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:33 vm00.local ceph-mon[47668]: pgmap v2605: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:34 vm00.local ceph-mon[47668]: pgmap v2606: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:34 vm08.local ceph-mon[56824]: pgmap v2606: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:35.934 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:35.934 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:35.960 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:35.961 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:37 vm08.local ceph-mon[56824]: pgmap v2607: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:37 vm00.local ceph-mon[47668]: pgmap v2607: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:39 vm08.local ceph-mon[56824]: pgmap v2608: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:39 vm00.local ceph-mon[47668]: pgmap v2608: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:40.962 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:40.963 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:40.988 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:40.989 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:41 vm08.local ceph-mon[56824]: pgmap v2609: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:41 vm00.local ceph-mon[47668]: pgmap v2609: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:43 vm08.local ceph-mon[56824]: pgmap v2610: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:43.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:43 vm00.local ceph-mon[47668]: pgmap v2610: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:44 vm00.local ceph-mon[47668]: pgmap v2611: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:44 vm08.local ceph-mon[56824]: pgmap v2611: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:45.990 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:45.991 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:46.019 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:46.019 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:47 vm08.local ceph-mon[56824]: pgmap v2612: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:47 vm00.local ceph-mon[47668]: pgmap v2612: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:49 vm08.local ceph-mon[56824]: pgmap v2613: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:49 vm00.local ceph-mon[47668]: pgmap v2613: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:51.020 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:51.021 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:51.047 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:51.048 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:51 vm08.local ceph-mon[56824]: pgmap v2614: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:51 vm00.local ceph-mon[47668]: pgmap v2614: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:53 vm08.local ceph-mon[56824]: pgmap v2615: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:53.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:53 vm00.local ceph-mon[47668]: pgmap v2615: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:30:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:30:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:30:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:30:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:54 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:30:55.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:54 vm00.local ceph-mon[47668]: pgmap v2616: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:55.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:30:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:30:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:30:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:30:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:54 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:30:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:54 vm08.local ceph-mon[56824]: pgmap v2616: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:56.049 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:30:56.049 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:30:56.075 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:30:56.076 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:30:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:57 vm08.local ceph-mon[56824]: pgmap v2617: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:57 vm00.local ceph-mon[47668]: pgmap v2617: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:30:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:30:59 vm08.local ceph-mon[56824]: pgmap v2618: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:30:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:30:59 vm00.local ceph-mon[47668]: pgmap v2618: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:01.077 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:01.078 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:01.103 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:01.104 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:01.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:01 vm08.local ceph-mon[56824]: pgmap v2619: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:01 vm00.local ceph-mon[47668]: pgmap v2619: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:03 vm08.local ceph-mon[56824]: pgmap v2620: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:03 vm00.local ceph-mon[47668]: pgmap v2620: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:04 vm00.local ceph-mon[47668]: pgmap v2621: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:04 vm08.local ceph-mon[56824]: pgmap v2621: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:06.105 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:06.106 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:06.135 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:06.135 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:31:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:31:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:31:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:31:07.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:06 vm00.local ceph-mon[47668]: pgmap v2622: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:06 vm08.local ceph-mon[56824]: pgmap v2622: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:09 vm08.local ceph-mon[56824]: pgmap v2623: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:09 vm00.local ceph-mon[47668]: pgmap v2623: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:11.136 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:11.137 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:11.162 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:11.163 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:11.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:11 vm08.local ceph-mon[56824]: pgmap v2624: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:11 vm00.local ceph-mon[47668]: pgmap v2624: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:13.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:13 vm08.local ceph-mon[56824]: pgmap v2625: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:13.930 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:13 vm00.local ceph-mon[47668]: pgmap v2625: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:15.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:14 vm00.local ceph-mon[47668]: pgmap v2626: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:14 vm08.local ceph-mon[56824]: pgmap v2626: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:16.164 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:16.165 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:16.193 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:16.193 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:17 vm08.local ceph-mon[56824]: pgmap v2627: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:17 vm00.local ceph-mon[47668]: pgmap v2627: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:19 vm08.local ceph-mon[56824]: pgmap v2628: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:19 vm00.local ceph-mon[47668]: pgmap v2628: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:21.195 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:21.195 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:21.220 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:21.221 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:21.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:21 vm08.local ceph-mon[56824]: pgmap v2629: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:21 vm00.local ceph-mon[47668]: pgmap v2629: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:23.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:23 vm08.local ceph-mon[56824]: pgmap v2630: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:23.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:23 vm00.local ceph-mon[47668]: pgmap v2630: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:25.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:24 vm00.local ceph-mon[47668]: pgmap v2631: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:25.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:24 vm08.local ceph-mon[56824]: pgmap v2631: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:26.222 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:26.223 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:26.250 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:26.250 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:27.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:27 vm08.local ceph-mon[56824]: pgmap v2632: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:27.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:27 vm00.local ceph-mon[47668]: pgmap v2632: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:29.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:29 vm08.local ceph-mon[56824]: pgmap v2633: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:29.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:29 vm00.local ceph-mon[47668]: pgmap v2633: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:31.252 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:31.252 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:31.278 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:31.279 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:31.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:31 vm08.local ceph-mon[56824]: pgmap v2634: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:31.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:31 vm00.local ceph-mon[47668]: pgmap v2634: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:33.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:33 vm00.local ceph-mon[47668]: pgmap v2635: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:34.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:33 vm08.local ceph-mon[56824]: pgmap v2635: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:35.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:34 vm00.local ceph-mon[47668]: pgmap v2636: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:35.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:34 vm08.local ceph-mon[56824]: pgmap v2636: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:36.280 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:36.281 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:36.307 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:36.307 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:37.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:37 vm08.local ceph-mon[56824]: pgmap v2637: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:37.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:37 vm00.local ceph-mon[47668]: pgmap v2637: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:39.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:39 vm08.local ceph-mon[56824]: pgmap v2638: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:39.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:39 vm00.local ceph-mon[47668]: pgmap v2638: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:41.308 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:41.309 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:41.336 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:41.336 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:41.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:41 vm08.local ceph-mon[56824]: pgmap v2639: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:41.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:41 vm00.local ceph-mon[47668]: pgmap v2639: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:43.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:43 vm08.local ceph-mon[56824]: pgmap v2640: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:43.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:43 vm00.local ceph-mon[47668]: pgmap v2640: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:45.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:44 vm00.local ceph-mon[47668]: pgmap v2641: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:45.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:44 vm08.local ceph-mon[56824]: pgmap v2641: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:46.337 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:46.338 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:46.364 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:46.364 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:47.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:47 vm08.local ceph-mon[56824]: pgmap v2642: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:47.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:47 vm00.local ceph-mon[47668]: pgmap v2642: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:49.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:49 vm08.local ceph-mon[56824]: pgmap v2643: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:49.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:49 vm00.local ceph-mon[47668]: pgmap v2643: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:51.366 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:51.367 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:51.393 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:51.394 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:51.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:51 vm08.local ceph-mon[56824]: pgmap v2644: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:51.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:51 vm00.local ceph-mon[47668]: pgmap v2644: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:53.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:53 vm08.local ceph-mon[56824]: pgmap v2645: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:53.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:53 vm00.local ceph-mon[47668]: pgmap v2645: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:31:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:31:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:31:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:55 vm08.local ceph-mon[56824]: pgmap v2646: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:31:55.378 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:55 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:55 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:55 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:55 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:55 vm00.local ceph-mon[47668]: pgmap v2646: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:55 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:31:55.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:55 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' 2026-03-09T00:31:56.395 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:31:56.396 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:31:56.423 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:31:56.423 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:31:57.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:57 vm08.local ceph-mon[56824]: pgmap v2647: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:57.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:57 vm00.local ceph-mon[47668]: pgmap v2647: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:31:59.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:31:59 vm08.local ceph-mon[56824]: pgmap v2648: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:31:59.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:31:59 vm00.local ceph-mon[47668]: pgmap v2648: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:01.424 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:32:01.425 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:32:01.451 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:32:01.451 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:32:01.878 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:01 vm08.local ceph-mon[56824]: pgmap v2649: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:01.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:01 vm00.local ceph-mon[47668]: pgmap v2649: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:03.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:03 vm08.local ceph-mon[56824]: pgmap v2650: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:03.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:03 vm00.local ceph-mon[47668]: pgmap v2650: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:05.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:04 vm00.local ceph-mon[47668]: pgmap v2651: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:05.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:04 vm08.local ceph-mon[56824]: pgmap v2651: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:32:06.179 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:05 vm00.local ceph-mon[47668]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:32:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:32:06.279 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:05 vm08.local ceph-mon[56824]: from='mgr.14236 192.168.123.100:0/178761586' entity='mgr.vm00.pkgtpt' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.pkgtpt/trash_purge_schedule"}]: dispatch 2026-03-09T00:32:06.452 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:32:06.453 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:32:06.480 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:32:06.481 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:32:07.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:06 vm08.local ceph-mon[56824]: pgmap v2652: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:07.429 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:06 vm00.local ceph-mon[47668]: pgmap v2652: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:09.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:09 vm08.local ceph-mon[56824]: pgmap v2653: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:09.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:09 vm00.local ceph-mon[47668]: pgmap v2653: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:11.482 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:32:11.483 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:32:11.651 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:32:11.652 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:32:11.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:11 vm00.local ceph-mon[47668]: pgmap v2654: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:12.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:11 vm08.local ceph-mon[56824]: pgmap v2654: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:13.696 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:13 vm00.local ceph-mon[47668]: pgmap v2655: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:14.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:13 vm08.local ceph-mon[56824]: pgmap v2655: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:15.377 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:14 vm08.local ceph-mon[56824]: pgmap v2656: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:15.430 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:14 vm00.local ceph-mon[47668]: pgmap v2656: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:16.654 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:32:16.655 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:32:16.685 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:32:16.685 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:32:17.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:17 vm08.local ceph-mon[56824]: pgmap v2657: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:17.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:17 vm00.local ceph-mon[47668]: pgmap v2657: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:19.877 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:19 vm08.local ceph-mon[56824]: pgmap v2658: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:19.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:19 vm00.local ceph-mon[47668]: pgmap v2658: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 170 B/s wr, 0 op/s 2026-03-09T00:32:21.686 INFO:teuthology.orchestra.run.vm00.stderr:++ hostname 2026-03-09T00:32:21.687 INFO:teuthology.orchestra.run.vm00.stderr:+ mount -t nfs vm00.local:/fake /mnt/foo -o sync 2026-03-09T00:32:21.715 INFO:teuthology.orchestra.run.vm00.stderr:mount.nfs: mount system call failed 2026-03-09T00:32:21.716 INFO:teuthology.orchestra.run.vm00.stderr:+ sleep 5 2026-03-09T00:32:21.929 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Mar 09 00:32:21 vm00.local ceph-mon[47668]: pgmap v2659: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:22.127 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 09 00:32:21 vm08.local ceph-mon[56824]: pgmap v2659: 97 pgs: 97 active+clean; 453 KiB data, 83 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 255 B/s wr, 0 op/s 2026-03-09T00:32:22.382 DEBUG:teuthology.exit:Got signal 15; running 1 handler... 2026-03-09T00:32:22.383 DEBUG:teuthology.exit:Finished running handlers